diff --git a/.github/workflows/github-page-builder.yml b/.github/workflows/github-page-builder.yml index bf7e9c4..dd32a0c 100644 --- a/.github/workflows/github-page-builder.yml +++ b/.github/workflows/github-page-builder.yml @@ -5,6 +5,7 @@ on: push: branches: - main + # - farms-core-integration jobs: build: @@ -12,7 +13,9 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v4 + with: + ref: farms-core-integration - name: Set up Python uses: actions/setup-python@v2 @@ -23,14 +26,29 @@ jobs: run: | sudo apt-get update sudo apt-get install -y make automake gcc g++ subversion python3-dev - pip install sphinx sphinx-rtd-theme cython numpy + python3 -m pip install setuptools wheel + python3 -m pip install --no-use-pep517 pip==23.2.1 + python3 -m pip install --no-use-pep517 distlib==0.3.8 + python3 -m pip install --no-use-pep517 setuptools==65.5.0 + python3 -m pip install --no-use-pep517 cython + python3 -m pip install --no-use-pep517 numpy + python3 -m pip install --no-use-pep517 networkx + python3 -m pip install --no-use-pep517 -r ducks/source/requirements.txt + cd ../ + git clone https://github.com/farmsim/farms_core.git + cd farms_core + python3 -m pip install --no-use-pep517 . + python3 -m pip --version + python3 -m pip list + cd ../farms_network + python setup.py build_ext --inplace - name: Build HTML run: | - cd docs + cd ducks make html cd .. - mv docs/build/html/ public/ + mv ducks/build/html/ public/ - name: Cache documentation artifacts uses: actions/upload-artifact@v4 @@ -40,7 +58,7 @@ jobs: - name: Deploy uses: peaceiris/actions-gh-pages@v4 - if: github.ref == 'refs/heads/main' + # if: github.ref == 'refs/heads/main' with: github_token: ${{ secrets.GITHUB_TOKEN }} publish_dir: ./public diff --git a/.github/workflows/python-cross-platform-installer.yml b/.github/workflows/python-cross-platform-installer.yml new file mode 100644 index 0000000..b6b2593 --- /dev/null +++ b/.github/workflows/python-cross-platform-installer.yml @@ -0,0 +1,45 @@ +# This workflow will install Python dependencies, run tests and lint with a single version of Python +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python + +name: Python application + +on: + push: + branches: [ "main",] + pull_request: + branches: [ "main" ] + +permissions: + contents: read + +jobs: + build: + + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest, macos-latest, windows-latest] + python-version: ["3.11", "3.13"] + exclude: + - os: macos-latest + python-version: "3.13" + - os: windows-latest + python-version: "3.13" + - os: ubuntu-latest + python-version: "3.13" + + steps: + - uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + # You can test your matrix by printing the current Python version + - name: Display Python version + run: python -c "import sys; print(sys.version)" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + # if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + - name: Run installation + run: python -m pip install . --verbose diff --git a/.gitignore b/.gitignore index 56da4a3..b85fb08 100644 --- a/.gitignore +++ b/.gitignore @@ -36,8 +36,9 @@ build/ *.so *.dylib -# Generated cython c-files +# Generated cython c/cpp-files *.c +*.cpp # Generated cython perf html files *.html diff --git a/CNAME b/CNAME new file mode 100644 index 0000000..c598ed3 --- /dev/null +++ b/CNAME @@ -0,0 +1 @@ +network.farmsim.dev \ No newline at end of file diff --git a/docs/source/index.rst b/docs/source/index.rst deleted file mode 100644 index 9baf6e8..0000000 --- a/docs/source/index.rst +++ /dev/null @@ -1,78 +0,0 @@ -.. FARMS_NETWORK documentation master file, created by - sphinx-quickstart on Thu Nov 21 19:42:22 2019. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Welcome to FARMS_NETWORK's documentation! -=========================================== - -.. warning:: Farmers are currently busy! Documentation is work in progress!! - -.. toctree:: - :maxdepth: 2 - :caption: Contents: - - tutorials - modules - tests - contributing - - -Installation steps -------------------- - -Requirements -^^^^^^^^^^^^ - -"Code is only currently tested with Python3" - -The installation requires Cython. To install Cython, - -.. code-block:: console - - $ pip install cython - -(**NOTE** : *Depending on your system installation you may want to use pip3 instead of pip to use python3*) - -Installation -^^^^^^^^^^^^^ - -- Navigate to the root of the directory: - - .. code-block:: console - - $ cd farms_network - -- Install system wide with pip: - - .. code-block:: console - - $ pip install . - -- Install for user with pip: - - .. code-block:: console - - $ pip install . --user - -- Install in developer mode so that you don't have to install every time you make changes to the repository: - - .. code-block:: console - - $ pip install -e . - - - (You may use `--user` option with the above command if you want install only for a user): - -- To only compile the module: - - .. code-block:: console - - $ python setup.py build_ext -i - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/docs/Makefile b/ducks/Makefile similarity index 100% rename from docs/Makefile rename to ducks/Makefile diff --git a/docs/source/conf.py b/ducks/source/conf.py similarity index 73% rename from docs/source/conf.py rename to ducks/source/conf.py index 2a2ddbb..191e901 100644 --- a/docs/source/conf.py +++ b/ducks/source/conf.py @@ -17,17 +17,31 @@ # -- Project information ----------------------------------------------------- -project = 'FARMS_NETWORK' -copyright = '2019, BioRob' +project = 'FARMS-NETWORK' +copyright = '2025, Jonathan Arreguit & Shravan Tata Ramalingasetty' author = 'FARMSIM' master_doc = 'index' +# Github +github_username = "farmsim" +github_repository = "farms_network" + # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = ["sphinx.ext.autodoc",] +extensions = [ + "sphinx.ext.autodoc", + "sphinxcontrib.bibtex", + "sphinxcontrib.youtube", + "sphinx_copybutton", + "sphinx_favicon", + "sphinx_reredirects", + "sphinx_toolbox.collapse", + "sphinx_toolbox.sidebar_links", + "sphinx_toolbox.github", +] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -35,15 +49,21 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] +exclude_patterns = [ + '_build', + 'Thumbs.db', + '.DS_Store' +] +# Bibtex references for sphinxcontrib.bibtex +bibtex_bibfiles = ["references.bib"] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -# -html_theme = 'sphinx_rtd_theme' +# sphinx_rtd_theme +html_theme = "furo" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, diff --git a/ducks/source/core/data.rst b/ducks/source/core/data.rst new file mode 100644 index 0000000..bebb24b --- /dev/null +++ b/ducks/source/core/data.rst @@ -0,0 +1,8 @@ +.. automodule:: farms_network.core.data + :platform: Unix, Windows + :synopsis: Provides Node C-Structure and Python interface for nodes in a dynamical system. + :members: + :show-inheritance: + :private-members: + :special-members: + :noindex: diff --git a/ducks/source/core/index.rst b/ducks/source/core/index.rst new file mode 100644 index 0000000..16d5d4b --- /dev/null +++ b/ducks/source/core/index.rst @@ -0,0 +1,8 @@ +Core +==== + +.. toctree:: + :hidden: + + options + node diff --git a/ducks/source/core/node.rst b/ducks/source/core/node.rst new file mode 100644 index 0000000..67d9ffb --- /dev/null +++ b/ducks/source/core/node.rst @@ -0,0 +1,85 @@ +Node +==== + +This documentation describes the Node structure and Python interface provided in the `node_cy.pyx` and `node_cy.pxd` files. + +Contents +-------- + +- Node C Structure +- Node Python Class +- Functions (ODE and Output) + +Node C Structure +---------------- + +The Node C structure defines the internal state and behavior of a node in a dynamical system. +It contains generic attributes like state variables, inputs, and parameters. +All nodes are many-inputs-single-output (MISO). +The simplest case would be a node with one input and one input. +A node can have N-states that will be integrated by a numerical integrator over time. +A stateless node will have zero states and is useful in using the node as a transfer function. + + +Node +==== + +.. autoclass:: farms_network.core.node.Node + :members: + +The Node class provides a high-level interface for neural network nodes. + +Constructor +---------- + +.. code-block:: python + + Node(name: str, **kwargs) + +**Parameters:** + +- ``name`` (str): Unique identifier for the node +- ``**kwargs``: Additional configuration parameters passed to NodeCy + +Class Methods +------------ + +from_options +^^^^^^^^^^^ + +Create a node from configuration options. + +.. code-block:: python + + @classmethod + def from_options(cls, node_options: NodeOptions) -> Node + +**Parameters:** + +- ``node_options`` (NodeOptions): Configuration options + +**Returns:** + +- Node: Configured node instance + +Examples +-------- + +Creating a node: + +.. code-block:: python + + # Direct instantiation + node = Node("neuron1") + + # From options + options = NodeOptions(name="neuron1") + node = Node.from_options(options) + + +.. automodule:: farms_network.core.node_cy + :platform: Unix, Windows + :synopsis: Provides Node C-Structure for nodes in a dynamical system. + :members: + :show-inheritance: + :noindex: diff --git a/ducks/source/core/options.rst b/ducks/source/core/options.rst new file mode 100644 index 0000000..304993e --- /dev/null +++ b/ducks/source/core/options.rst @@ -0,0 +1,764 @@ +Configuration Options +==================== + +This module provides configuration options for neural network components and simulations. + + +NodeOptions Class +----------------- +.. autoclass:: farms_network.core.options.NodeOptions + :members: + :undoc-members: + + **Attributes:** + + - **name** (str): Name of the node. + - **model** (str): Node model type. + - **parameters** (:class:`NodeParameterOptions`): Node-specific parameters. + - **state** (:class:`NodeStateOptions`): Node state options. + +NodeParameterOptions Class +-------------------------- +.. autoclass:: farms_network.core.options.NodeParameterOptions + :members: + :undoc-members: + +NodeStateOptions Class +---------------------- +.. autoclass:: farms_network.core.options.NodeStateOptions + :members: + :undoc-members: + + **Attributes:** + + - **initial** (list of float): Initial state values. + - **names** (list of str): State variable names. + +EdgeOptions Class +----------------- +.. autoclass:: farms_network.core.options.EdgeOptions + :members: + :undoc-members: + + **Attributes:** + + - **from_node** (str): Source node of the edge. + - **to_node** (str): Target node of the edge. + - **weight** (float): Weight of the edge. + - **type** (str): Edge type (e.g., excitatory, inhibitory). + +EdgeVisualOptions Class +----------------------- +.. autoclass:: farms_network.core.options.EdgeVisualOptions + :members: + :undoc-members: + + **Attributes:** + + - **color** (list of float): Color of the edge. + - **label** (str): Label for the edge. + - **layer** (str): Layer in which the edge is displayed. +.. + + Network Options + ------------- + + .. autoclass:: farms_network.options.NetworkOptions + :members: + :undoc-members: + + The main configuration class for neural networks. Controls network structure, simulation parameters, and logging. + + **Key Attributes:** + + - ``directed``: Network directionality (default: True) + - ``multigraph``: Allow multiple edges between nodes (default: False) + - ``nodes``: List of :class:`NodeOptions` + - ``edges``: List of :class:`EdgeOptions` + - ``integration``: :class:`IntegrationOptions` for simulation settings + - ``logs``: :class:`NetworkLogOptions` for data collection + - ``random_seed``: Seed for reproducibility + + Node Options + ----------- + + .. autoclass:: farms_network.options.NodeOptions + :members: + :undoc-members: + + Base class for neuron configuration. + + **Key Attributes:** + + - ``name``: Unique identifier + - ``model``: Neural model type + - ``parameters``: Model-specific parameters + - ``state``: Initial state variables + - ``visual``: Visualization settings + - ``noise``: Noise configuration + + Available Node Models + ^^^^^^^^^^^^^^^^^^^ + + * :class:`RelayNodeOptions`: Simple signal relay + * :class:`LinearNodeOptions`: Linear transformation + * :class:`ReLUNodeOptions`: Rectified linear unit + * :class:`OscillatorNodeOptions`: Phase-amplitude oscillator + * :class:`LIDannerNodeOptions`: Leaky integrator (Danner model) + * :class:`LINaPDannerNodeOptions`: Leaky integrator with NaP + + Edge Options + ----------- + + .. autoclass:: farms_network.options.EdgeOptions + :members: + :undoc-members: + + Configuration for synaptic connections. + + **Key Attributes:** + + - ``source``: Source node name + - ``target``: Target node name + - ``weight``: Connection strength + - ``type``: Synapse type (e.g., excitatory/inhibitory) + - ``parameters``: Model-specific parameters + - ``visual``: Visualization settings + + Integration Options + ----------------- + + .. autoclass:: farms_network.options.IntegrationOptions + :members: + :undoc-members: + + Numerical integration settings. + + **Key Attributes:** + + - ``timestep``: Integration step size + - ``n_iterations``: Number of iterations + - ``integrator``: Integration method (e.g., 'rk4') + - ``method``: Solver method + - ``atol``: Absolute tolerance + - ``rtol``: Relative tolerance + + Example Usage + ----------- + + Basic network configuration: + + .. code-block:: python + + from farms_network.options import NetworkOptions, LIDannerNodeOptions, EdgeOptions + + # Create network options + net_opts = NetworkOptions( + directed=True, + integration=IntegrationOptions.defaults(timestep=0.1), + logs=NetworkLogOptions(n_iterations=1000) + ) + + # Add nodes + node1 = LIDannerNodeOptions( + name="neuron1", + parameters=LIDannerNodeParameterOptions.defaults() + ) + net_opts.add_node(node1) + + # Add edges + edge = EdgeOptions( + source="neuron1", + target="neuron2", + weight=0.5, + type="excitatory" + ) + net_opts.add_edge(edge) + + + Configuration Options + ==================== + + Base Options + ----------- + + Node Options + ^^^^^^^^^^^ + + .. autoclass:: farms_network.options.NodeOptions + :members: + + Base class for all node configurations. + + **Attributes:** + + - ``name`` (str): Unique identifier + - ``model`` (str): Neural model type + - ``parameters`` (NodeParameterOptions): Model-specific parameters + - ``state`` (NodeStateOptions): Initial state variables + - ``visual`` (NodeVisualOptions): Visualization settings + - ``noise`` (NoiseOptions): Noise configuration + + Node Visual Options + ^^^^^^^^^^^^^^^^^ + + .. autoclass:: farms_network.options.NodeVisualOptions + :members: + + Visualization settings for nodes. + + **Default Values:** + + .. list-table:: + :header-rows: 1 + + * - Parameter + - Default Value + - Description + * - position + - [0.0, 0.0, 0.0] + - 3D coordinates + * - radius + - 1.0 + - Node size + * - color + - [1.0, 0.0, 0.0] + - RGB values + * - label + - "n" + - Display label + * - layer + - "background" + - Rendering layer + * - latex + - "{}" + - LaTeX formatting + + Node State Options + ^^^^^^^^^^^^^^^^^ + + .. autoclass:: farms_network.options.NodeStateOptions + :members: + + Base class for node states. + + **Attributes:** + + - ``initial`` (List[float]): Initial state values + - ``names`` (List[str]): State variable names + + Node Models + ----------- + + Relay Node + ^^^^^^^^^ + + .. autoclass:: farms_network.options.RelayNodeOptions + :members: + + Simple signal relay node. + + Linear Node + ^^^^^^^^^^ + + .. autoclass:: farms_network.options.LinearNodeOptions + :members: + + Linear transformation node. + + **Default Parameters:** + + .. list-table:: + :header-rows: 1 + + * - Parameter + - Default Value + - Description + * - slope + - 1.0 + - Linear transformation slope + * - bias + - 0.0 + - Constant offset + + ReLU Node + ^^^^^^^^ + + .. autoclass:: farms_network.options.ReLUNodeOptions + :members: + + Rectified Linear Unit node. + + **Default Parameters:** + + .. list-table:: + :header-rows: 1 + + * - Parameter + - Default Value + - Description + * - gain + - 1.0 + - Amplification factor + * - sign + - 1 + - Direction (+1/-1) + * - offset + - 0.0 + - Activation threshold + + Oscillator Node + ^^^^^^^^^^^^^ + + .. autoclass:: farms_network.options.OscillatorNodeOptions + :members: + + Phase-amplitude oscillator. + + **Default Parameters:** + + .. list-table:: + :header-rows: 1 + + * - Parameter + - Default Value + - Description + * - intrinsic_frequency + - 1.0 + - Base frequency (Hz) + * - nominal_amplitude + - 1.0 + - Base amplitude + * - amplitude_rate + - 1.0 + - Amplitude change rate + + Leaky Integrator Node (Danner) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + .. autoclass:: farms_network.options.LIDannerNodeOptions + :members: + + Leaky integrator with Danner dynamics. + + **Default Parameters:** + + .. list-table:: + :header-rows: 1 + + * - Parameter + - Default Value + - Description + * - c_m + - 10.0 + - Membrane capacitance (pF) + * - g_leak + - 2.8 + - Leak conductance (nS) + * - e_leak + - -60.0 + - Leak reversal potential (mV) + * - v_max + - 0.0 + - Maximum voltage (mV) + * - v_thr + - -50.0 + - Threshold voltage (mV) + * - g_syn_e + - 10.0 + - Excitatory synaptic conductance (nS) + * - g_syn_i + - 10.0 + - Inhibitory synaptic conductance (nS) + * - e_syn_e + - -10.0 + - Excitatory synaptic reversal potential (mV) + * - e_syn_i + - -75.0 + - Inhibitory synaptic reversal potential (mV) + * - tau_ch + - 5.0 + - Cholinergic time constant (ms) + + LINaP Node (Danner) + ^^^^^^^^^^^^^^^^^ + + .. autoclass:: farms_network.options.LINaPDannerNodeOptions + :members: + + Leaky integrator with persistent sodium current. + + **Default Parameters:** + + .. list-table:: + :header-rows: 1 + + * - Parameter + - Default Value + - Description + * - c_m + - 10.0 + - Membrane capacitance (pF) + * - g_nap + - 4.5 + - NaP conductance (nS) + * - e_na + - 50.0 + - Sodium reversal potential (mV) + * - v1_2_m + - -40.0 + - Half-activation voltage (mV) + * - k_m + - -6.0 + - Activation slope + * - v1_2_h + - -45.0 + - Half-inactivation voltage (mV) + * - k_h + - 4.0 + - Inactivation slope + * - v1_2_t + - -35.0 + - Threshold half-activation (mV) + * - k_t + - 15.0 + - Threshold slope + * - g_leak + - 4.5 + - Leak conductance (nS) + * - e_leak + - -62.5 + - Leak reversal potential (mV) + * - tau_0 + - 80.0 + - Base time constant (ms) + * - tau_max + - 160.0 + - Maximum time constant (ms) + + Edge Options + ----------- + + .. autoclass:: farms_network.options.EdgeOptions + :members: + + Configuration for synaptic connections. + + **Attributes:** + + - ``source`` (str): Source node name + - ``target`` (str): Target node name + - ``weight`` (float): Connection strength + - ``type`` (str): Synapse type + - ``parameters`` (EdgeParameterOptions): Model-specific parameters + - ``visual`` (EdgeVisualOptions): Visualization settings + + Edge Visual Options + ^^^^^^^^^^^^^^^^^ + + .. autoclass:: farms_network.options.EdgeVisualOptions + :members: + + **Default Values:** + + .. list-table:: + :header-rows: 1 + + * - Parameter + - Default Value + - Description + * - color + - [1.0, 0.0, 0.0] + - RGB values + * - alpha + - 1.0 + - Transparency + * - label + - "" + - Display label + * - layer + - "background" + - Rendering layer + * - arrowstyle + - "->" + - Arrow appearance + * - connectionstyle + - "arc3,rad=0.1" + - Connection curve + * - linewidth + - 1.5 + - Line thickness + * - edgecolor + - [0.0, 0.0, 0.0] + - Border color + + Integration Options + ----------------- + + .. autoclass:: farms_network.options.IntegrationOptions + :members: + + Numerical integration settings. + + **Default Values:** + + .. list-table:: + :header-rows: 1 + + * - Parameter + - Default Value + - Description + * - timestep + - 0.001 + - Integration step (s) + * - n_iterations + - 1000 + - Number of steps + * - integrator + - "rk4" + - Integration method + * - method + - "adams" + - Solver method + * - atol + - 1e-12 + - Absolute tolerance + * - rtol + - 1e-6 + - Relative tolerance + * - max_step + - 0.0 + - Maximum step size + * - checks + - True + - Enable validation + + Network Log Options + ----------------- + + .. autoclass:: farms_network.options.NetworkLogOptions + :members: + + Logging configuration. + + **Default Values:** + + .. list-table:: + :header-rows: 1 + + * - Parameter + - Default Value + - Description + * - n_iterations + - Required + - Number of iterations to log + * - buffer_size + - n_iterations + - Log buffer size + * - nodes_all + - False + - Log all nodes + + Options + ======= + + This module contains the configuration options for neural network models, including options for nodes, edges, integration, and visualization. + + NetworkOptions Class + -------------------- + .. autoclass:: farms_network.core.options.NetworkOptions + :members: + :undoc-members: + + **Attributes:** + + - **directed** (bool): Whether the network is directed. Default is `True`. + - **multigraph** (bool): Whether the network allows multiple edges between nodes. Default is `False`. + - **graph** (dict): Graph properties (e.g., name). Default is `{"name": ""}`. + - **units** (optional): Units for the network. Default is `None`. + - **integration** (:class:`IntegrationOptions`): Options for numerical integration. Default values shown in the table below. + + IntegrationOptions Class + ------------------------ + .. autoclass:: farms_network.core.options.IntegrationOptions + :members: + :undoc-members: + + The default values for `IntegrationOptions` are as follows: + + +------------+-------------------+ + | Parameter | Default Value | + +------------+-------------------+ + | timestep | ``1e-3`` | + +------------+-------------------+ + | integrator | ``"dopri5"`` | + +------------+-------------------+ + | method | ``"adams"`` | + +------------+-------------------+ + | atol | ``1e-12`` | + +------------+-------------------+ + | rtol | ``1e-6`` | + +------------+-------------------+ + | max_step | ``0.0`` | + +------------+-------------------+ + | checks | ``True`` | + +------------+-------------------+ + + NodeOptions Class + ----------------- + .. autoclass:: farms_network.core.options.NodeOptions + :members: + :undoc-members: + + **Attributes:** + + - **name** (str): Name of the node. + - **model** (str): Node model type. + - **parameters** (:class:`NodeParameterOptions`): Node-specific parameters. + - **state** (:class:`NodeStateOptions`): Node state options. + + NodeParameterOptions Class + -------------------------- + .. autoclass:: farms_network.core.options.NodeParameterOptions + :members: + :undoc-members: + + The default values for `NodeParameterOptions` are as follows: + + +----------------+----------------+ + | Parameter | Default Value | + +================+================+ + | c_m | ``10.0`` pF | + +----------------+----------------+ + | g_leak | ``2.8`` nS | + +----------------+----------------+ + | e_leak | ``-60.0`` mV | + +----------------+----------------+ + | v_max | ``0.0`` mV | + +----------------+----------------+ + | v_thr | ``-50.0`` mV | + +----------------+----------------+ + | g_syn_e | ``10.0`` nS | + +----------------+----------------+ + | g_syn_i | ``10.0`` nS | + +----------------+----------------+ + | e_syn_e | ``-10.0`` mV | + +----------------+----------------+ + | e_syn_i | ``-75.0`` mV | + +----------------+----------------+ + + NodeStateOptions Class + ---------------------- + .. autoclass:: farms_network.core.options.NodeStateOptions + :members: + :undoc-members: + + **Attributes:** + + - **initial** (list of float): Initial state values. + - **names** (list of str): State variable names. + + EdgeOptions Class + ----------------- + .. autoclass:: farms_network.core.options.EdgeOptions + :members: + :undoc-members: + + **Attributes:** + + - **from_node** (str): Source node of the edge. + - **to_node** (str): Target node of the edge. + - **weight** (float): Weight of the edge. + - **type** (str): Edge type (e.g., excitatory, inhibitory). + + EdgeVisualOptions Class + ----------------------- + .. autoclass:: farms_network.core.options.EdgeVisualOptions + :members: + :undoc-members: + + **Attributes:** + + - **color** (list of float): Color of the edge. + - **label** (str): Label for the edge. + - **layer** (str): Layer in which the edge is displayed. + + LIDannerParameterOptions Class + ------------------------------ + .. autoclass:: farms_network.core.options.LIDannerParameterOptions + :members: + :undoc-members: + + The default values for `LIDannerParameterOptions` are as follows: + + +----------------+----------------+ + | Parameter | Default Value | + +================+================+ + | c_m | ``10.0`` pF | + +----------------+----------------+ + | g_leak | ``2.8`` nS | + +----------------+----------------+ + | e_leak | ``-60.0`` mV | + +----------------+----------------+ + | v_max | ``0.0`` mV | + +----------------+----------------+ + | v_thr | ``-50.0`` mV | + +----------------+----------------+ + | g_syn_e | ``10.0`` nS | + +----------------+----------------+ + | g_syn_i | ``10.0`` nS | + +----------------+----------------+ + | e_syn_e | ``-10.0`` mV | + +----------------+----------------+ + | e_syn_i | ``-75.0`` mV | + +----------------+----------------+ + + LINaPDannerParameterOptions Class + --------------------------------- + .. autoclass:: farms_network.core.options.LINaPDannerParameterOptions + :members: + :undoc-members: + + The default values for `LIDannerNaPParameterOptions` are as follows: + + +----------------+----------------+ + | Parameter | Default Value | + +================+================+ + | c_m | ``10.0`` pF | + +----------------+----------------+ + | g_nap | ``4.5`` nS | + +----------------+----------------+ + | e_na | ``50.0`` mV | + +----------------+----------------+ + | v1_2_m | ``-40.0`` mV | + +----------------+----------------+ + | k_m | ``-6.0`` | + +----------------+----------------+ + | v1_2_h | ``-45.0`` mV | + +----------------+----------------+ + | k_h | ``4.0`` | + +----------------+----------------+ + | v1_2_t | ``-35.0`` mV | + +----------------+----------------+ + | k_t | ``15.0`` | + +----------------+----------------+ + | g_leak | ``4.5`` nS | + +----------------+----------------+ + | e_leak | ``-62.5`` mV | + +----------------+----------------+ + | tau_0 | ``80.0`` ms | + +----------------+----------------+ + | tau_max | ``160.0`` ms | + +----------------+----------------+ + | v_max | ``0.0`` mV | + +----------------+----------------+ + | v_thr | ``-50.0`` mV | + +----------------+----------------+ + | g_syn_e | ``10.0`` nS | + +----------------+----------------+ + | g_syn_i | ``10.0`` nS | + +----------------+----------------+ + | e_syn_e | ``-10.0`` mV | + +----------------+----------------+ + | e_syn_i | ``-75.0`` mV | + +----------------+----------------+ diff --git a/ducks/source/index.rst b/ducks/source/index.rst new file mode 100644 index 0000000..62cc0dc --- /dev/null +++ b/ducks/source/index.rst @@ -0,0 +1,35 @@ +.. FARMS_NETWORK documentation master file, created by + sphinx-quickstart on Thu Nov 21 19:42:22 2019. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +=========================================== + Welcome to FARMS Network's documentation! +=========================================== + +.. warning:: Farmers are currently busy! Documentation is work in progress!! + +A neural network simulation library designed for simulating sparse neural networks written primrarily in python with efficient computing handled through Cython. Farms network provides commonly used neural models for locomotion circuits. + +============= + Introduction +============= + +Contents + +.. toctree:: + + introduction/index.rst + core/index.rst + models/index.rst + +.. sidebar-links:: + :github: + +==================== + Indices and tables +==================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/ducks/source/introduction/concepts.rst b/ducks/source/introduction/concepts.rst new file mode 100644 index 0000000..c212118 --- /dev/null +++ b/ducks/source/introduction/concepts.rst @@ -0,0 +1,123 @@ +Core Concepts +============= + +Network Components +------------------ + +Node +^^^^ +Basic unit representing a computational node, a neuron is a form of a node. A node receives n inputs and has n outputs. + +Properties: + +* Dynamics (ode)/computation +* Input integration +* Output generation + +Available dynamics models: + * Base + * Relay + * Linear + * Relu + * Oscillator + * Hopf oscillator + * Morphed oscillator + * Matsuoka oscillator + * Fitzhugh nagumo oscillator + * Morris lecar oscillator + * Leaky integrator + * Leaky integrator (danner) + * Leaky integrator with persistence sodium (danner) + * Leaky integrator (daun) + * Hodgkin-Huxley (daun) + * Izhikevich + * Hodgkin-Huxley + * Custom (user-defined) + +Edge +^^^^ +Connection between any two nodes. Characteristics: + +* Source node +* Target node +* Weight (connection strength) +* Type (excitatory/inhibitory/cholinergic) +* Parameters + +Network +^^^^^^^ +The primary container for the circuit simulation. Networks manage: + +* Component organization and connectivity +* Simulation configuration +* State tracking and data collection +* Input/output handling + +Simulation Elements +------------------- + +Time Management +^^^^^^^^^^^^^^^ +* ``dt``: Integration time step +* ``simulation_duration``: Total simulation time +* ``sampling_rate``: Data collection frequency + +State Variables +^^^^^^^^^^^^^^^ +* Membrane potentials +* Synaptic currents +* Ionic concentrations +* Firing rates +* Custom variables + +Input Handling +^^^^^^^^^^^^^^ +* External current injection +* Spike trains +* Continuous signals +* Stochastic inputs + +Output and Analysis +^^^^^^^^^^^^^^^^^^^ +* Membrane potential traces +* Spike times +* Population activity +* Network statistics +* Custom metrics + +Configuration +------------- + +Network Setup +^^^^^^^^^^^^^ +.. code-block:: python + + net.configure( + dt=0.1, # Time step (ms) + simulation_duration=1000.0, # Duration (ms) + sampling_rate=1.0, # Recording frequency (ms) + backend='cpu' # backend + ) + +Node Configuration +^^^^^^^^^^^^^^^^^^ +.. code node:: python + + neuron.configure( + threshold=-55.0, # Firing threshold (mV) + reset_potential=-70.0, # Reset potential (mV) + refractory_period=2.0, # Refractory period (ms) + capacitance=1.0, # Membrane capacitance (pF) + leak_conductance=0.1 # Leak conductance (nS) + ) + +Synapse Configuration +^^^^^^^^^^^^^^^^^^^^^ +.. code-block:: python + +Edge + + delay=1.0, # Transmission delay (ms) + plasticity='stdp', # Plasticity rule + learning_rate=0.01 # Learning rate + ) diff --git a/ducks/source/introduction/getting-started.rst b/ducks/source/introduction/getting-started.rst new file mode 100644 index 0000000..8f494ab --- /dev/null +++ b/ducks/source/introduction/getting-started.rst @@ -0,0 +1,52 @@ +Getting Started +=============== + +This guide demonstrates basic usage of farms_network through a simple example. + +Basic Example +------------ + +Let's create a simple neural network that simulates a basic reflex circuit: + +.. code-block:: python + + from farms_network import Network, Node, Edge + import numpy as np + + # Create network + net = Network('reflex_circuit') + + # Add neurons + sensory = Node('sensory', dynamics='leaky_integrator') + inter = Node('interneuron', dynamics='leaky_integrator') + motor = Node('motor', dynamics='leaky_integrator') + + # Add neurons to network + net.add_neurons([sensory, inter, motor]) + + # Create synaptic connections + syn1 = Synapse('sensory_to_inter', sensory, inter, weight=0.5) + syn2 = Synapse('inter_to_motor', inter, motor, weight=0.8) + + # Add synapses to network + net.add_synapses([syn1, syn2]) + + # Configure simulation parameters + net.configure(dt=0.1, simulation_duration=10.0) + + # Add input stimulus + stimulus = np.sin(np.linspace(0, 10, 100)) + net.set_external_input(sensory, stimulus) + + # Run simulation + results = net.simulate() + + # Plot results + net.plot_results(results) + +Key Concepts +----------- + +* **Network**: The main container for your neural circuit +* **Node**: Represents a neural unit with specific dynamics +* **Synapse**: Defines connections between neur diff --git a/ducks/source/introduction/index.rst b/ducks/source/introduction/index.rst new file mode 100644 index 0000000..ae9472d --- /dev/null +++ b/ducks/source/introduction/index.rst @@ -0,0 +1,8 @@ +Introduction +============ + +.. toctree:: + + installation + getting-started + concepts diff --git a/ducks/source/introduction/installation.rst b/ducks/source/introduction/installation.rst new file mode 100644 index 0000000..f97427c --- /dev/null +++ b/ducks/source/introduction/installation.rst @@ -0,0 +1,101 @@ +============== + Installation +============== + +Code is currently tested with Python3 version ["3.11", "3.13"] on ubuntu-latest, macos-latest, windows-latest using docker images on GitHub. +We recommend using a virtual environment to avoid conflicts with other Python packages. +This guide will help you install farms_network and its dependencies. + +Prerequisites +------------ + +Before installing farms_network, ensure you have the following prerequisites: + +* Python 3.11 or higher +* pip (Python package installer) +* A C++ compiler (for building extensions) + + +Basic Installation +---------------- + +You can install farms_network using pip: + +.. code-block:: bash + + pip git+install https://github.com/farmsim/farms_network.git + +(You may use `--user` option with the above command if you want install only for a user) + +(**NOTE** : *Depending on your system installation you may want to use pip3 instead of pip to use python3*) + +Development Installation +---------------------- + +For development purposes, you can install farms_network from source: + +.. code-block:: bash + + git clone https://github.com/username/farms_network.git + cd farms_network + pip install -e .[dev] + +The `[dev]` flag will install additional dependencies needed for development. + +Optional Dependencies +------------------- + +farms_network has several optional dependencies for extended functionality: + +.. + * **GPU Support**: For GPU acceleration + + .. code-block:: bash + + pip install farms_network[gpu] + +* **Visualization**: For advanced visualization features + + .. code-block:: bash + + pip install farms_network[viz] + + +Updating farms_network +-------------------- + +To update an existing installation to the latest version: + +.. code-block:: bash + + pip install --upgrade farms_network + +To update to a specific version: + +.. code-block:: bash + + pip install --upgrade farms_network== + +Note: After updating, it's recommended to restart any running applications or kernels using farms_network. + +Troubleshooting +-------------- + +Common Installation Issues +^^^^^^^^^^^^^^^^^^^^^^^^ + +1. **Compiler errors**: Ensure you have a compatible C++ compiler installed +2. **Missing dependencies**: Try installing the package with all optional dependencies: + + .. code-block:: bash + + pip install farms_network[all] + +3. **Version conflicts**: If you encounter dependency conflicts, try creating a fresh virtual environment: + + .. code-block:: bash + + python -m venv farms_env + source farms_env/bin/activate # On Unix + # or + farms_env\Scripts\activate # On Windows diff --git a/ducks/source/introduction/overview.rst b/ducks/source/introduction/overview.rst new file mode 100644 index 0000000..802d341 --- /dev/null +++ b/ducks/source/introduction/overview.rst @@ -0,0 +1,2 @@ +Overview +======== diff --git a/ducks/source/models/index.rst b/ducks/source/models/index.rst new file mode 100644 index 0000000..1d56d9d --- /dev/null +++ b/ducks/source/models/index.rst @@ -0,0 +1,7 @@ +Models +====== + +.. toctree:: + :hidden: + + li_danner diff --git a/ducks/source/models/li_danner.rst b/ducks/source/models/li_danner.rst new file mode 100644 index 0000000..db06627 --- /dev/null +++ b/ducks/source/models/li_danner.rst @@ -0,0 +1,15 @@ +Leaky Integrator Danner +======================= + +.. toctree:: + :hidden: + +.. automodule:: farms_network.models.li_danner + :members: + :show-inheritance: + :noindex: + +.. automodule:: farms_network.models.li_nap_danner + :members: + :show-inheritance: + :noindex: diff --git a/ducks/source/references.bib b/ducks/source/references.bib new file mode 100644 index 0000000..1d55e83 --- /dev/null +++ b/ducks/source/references.bib @@ -0,0 +1,18 @@ +@misc{arreguit-FARMS-2024, + title = {{{FARMS}}: {{Framework}} for {{Animal}} and {{Robot Modeling}} and {{Simulation}}}, + shorttitle = {{{FARMS}}}, + author = {Arreguit, Jonathan and Ramalingasetty, Shravan Tata and Ijspeert, Auke}, + year = {2024}, + month = mar, + primaryclass = {New Results}, + pages = {2023.09.25.559130}, + publisher = {bioRxiv}, + doi = {10.1101/2023.09.25.559130}, + url = {https://www.biorxiv.org/content/10.1101/2023.09.25.559130v2}, + urldate = {2024-03-14}, + abstract = {The study of animal locomotion and neuromechanical control offers valuable insights for advancing research in neuroscience, biomechanics, and robotics. We have developed FARMS (Framework for Animal and Robot Modeling and Simulation), an open-source, interdisciplinary framework, designed to facilitate access to neuromechanical simulations for modeling, simulation, and analysis of animal locomotion and bio-inspired robotic systems. By providing an accessible and user-friendly platform, FARMS aims to lower the barriers for researchers to explore the complex interactions between the nervous system, musculoskeletal structures, and their environment. Integrating the MuJoCo physics engine in a modular manner, FARMS enables realistic simulations and fosters collaboration among neuroscientists, biologists, and roboticists. FARMS has already been extensively used to study locomotion in animals such as mice, drosophila, fish, salamanders, and centipedes, serving as a platform to investigate the role of central pattern generators and sensory feedback. This article provides an overview of the FARMS framework, discusses its interdisciplinary approach, showcases its versatility through specific case studies, and highlights its effectiveness in advancing our understanding of locomotion. In particular, we show how we used FARMS to study amphibious locomotion by presenting experimental demonstrations across morphologies and environments based on neural controllers with central pattern generators and sensory feedback circuits models. Overall, the goal of FARMS is to contribute to a deeper understanding of animal locomotion, the development of innovative bio-inspired robotic systems, and promote accessibility in neuromechanical research.}, + archiveprefix = {bioRxiv}, + chapter = {New Results}, + copyright = {{\copyright} 2024, Posted by Cold Spring Harbor Laboratory. This pre-print is available under a Creative Commons License (Attribution-NonCommercial 4.0 International), CC BY-NC 4.0, as described at http://creativecommons.org/licenses/by-nc/4.0/}, + langid = {english} +} diff --git a/ducks/source/requirements.txt b/ducks/source/requirements.txt new file mode 100644 index 0000000..bb3e68a --- /dev/null +++ b/ducks/source/requirements.txt @@ -0,0 +1,21 @@ +Sphinx==7.3.7 +furo==2024.5.6 +sphinx-autodoc-typehints==2.1.0 +sphinx-basic-ng==1.0.0b2 +sphinx-copybutton==0.5.2 +sphinx-favicon==1.0.1 +sphinx-jinja2-compat==0.2.0.post1 +sphinx-prompt==1.8.0 +sphinx-reredirects==0.1.3 +sphinx-rtd-theme==2.0.0 +sphinx-tabs==3.4.5 +sphinx-toolbox==3.5.0 +sphinxcontrib-applehelp==1.0.8 +sphinxcontrib-bibtex==2.6.2 +sphinxcontrib-devhelp==1.0.6 +sphinxcontrib-htmlhelp==2.0.5 +sphinxcontrib-jquery==4.1 +sphinxcontrib-jsmath==1.0.1 +sphinxcontrib-qthelp==1.0.7 +sphinxcontrib-serializinghtml==1.1.10 +sphinxcontrib-youtube==1.4.1 diff --git a/examples/beer95/run.py b/examples/beer95/run.py new file mode 100644 index 0000000..7ecad12 --- /dev/null +++ b/examples/beer95/run.py @@ -0,0 +1,201 @@ +"""Leaky integrator + +[1] Beer RD. 1995. On the Dynamics of Small Continuous-Time Recurrent Neural Networks. +Adaptive Behavior 3:469–509. doi:10.1177/105971239500300405 + +""" + + +import matplotlib.pyplot as plt +import networkx as nx +import numpy as np +import seaborn as sns +from farms_core import pylog +from farms_core.utils import profile +from farms_network.core import options +from farms_network.core.data import NetworkData +from farms_network.core.network import Network +from tqdm import tqdm + +plt.rcParams['text.usetex'] = False + + +def join_strings(strings): + return "_".join(strings) + + +def generate_network(iterations=20000): + """ Generate network """ + + # Main network + network_options = options.NetworkOptions( + directed=True, + multigraph=False, + graph={"name": "beer95"}, + integration=options.IntegrationOptions.defaults( + n_iterations=iterations, + timestep=float(1e-3), + ), + logs=options.NetworkLogOptions( + n_iterations=iterations, + buffer_size=iterations, + ) + ) + + # Generate rhythm centers + n_neurons = 2 + + # Neuron + # Create an neuron for each joint + num_neurons = 2 + neuron_names = [f'n{num}' for num in range(num_neurons)] + biases = [-2.75, -1.75] + positions = [(0.0, -5.0), (0.0, 5.0)] + for j, neuron_name in enumerate(neuron_names): + network_options.add_node( + options.LeakyIntegratorNodeOptions( + name=neuron_name, + parameters=options.LeakyIntegratorParameterOptions.defaults( + tau=0.1, + bias=biases[j], + D=1.0, + ), + visual=options.NodeVisualOptions( + label=f"{j}", color=[1.0, 0.0, 0.0] + ), + state=options.LeakyIntegratorStateOptions.from_kwargs( + m=np.random.uniform(0.0, 1.0), + ), + noise=None, + ) + ) + + # Connect edges + connection_matrix = np.asarray( + [ + [4.5, 1,], + [-1, 4.5,], + ] + ).T + + for i, j in zip(*np.nonzero(connection_matrix)): + weight = connection_matrix[i, j] + print(f"{neuron_names[i]}-->{neuron_names[j]}={weight}") + network_options.add_edge( + options.EdgeOptions( + source=neuron_names[i], + target=neuron_names[j], + weight=weight, + type="excitatory" if weight > 0.0 else "inhibitory", + visual=options.EdgeVisualOptions(), + ) + ) + + network_options.save("/tmp/beer95.yaml") + + network = Network.from_options(network_options) + network.setup_integrator(network_options) + data = network.data + + # # Integrate + states = np.ones((iterations+1, len(data.states.array)))*0.0 + outputs = np.ones((iterations, len(data.outputs.array)))*1.0 + + # for index, node in enumerate(network_options.nodes): + # print(index, node.name) + # network.data.external_inputs.array[:] = np.ones((1,))*(iteration/iterations)*1.0 + for iteration in tqdm(range(0, iterations), colour='green', ascii=' >='): + # network.step(network.ode, iteration*1e-3, network.data.states.array) + # network.step() + # states[iteration+1, :] = network.data.states.array + network.step() + network.data.times.array[iteration] = iteration*1e-3 + + # network.data.to_file("/tmp/network.h5") + plt.figure() + for j in range(n_neurons): + plt.plot( + np.array(network.data.times.array), + np.asarray(network.data.nodes[j].output.array), + label=f"{j}" + ) + plt.legend() + + graph = nx.node_link_graph( + network_options, + directed=True, + multigraph=False, + link="edges", + name="name", + source="source", + target="target" + ) + plt.figure() + node_positions = nx.circular_layout(graph) + node_positions = nx.spring_layout(graph) + for index, node in enumerate(network_options.nodes): + node.visual.position[:2] = node_positions[node.name] + + network_options.save("/tmp/network_options.yaml") + + _ = nx.draw_networkx_nodes( + graph, + pos=node_positions, + node_color=[data["visual"]["color"] for node, data in graph.nodes.items()], + alpha=0.25, + edgecolors='k', + linewidths=2.0, + ) + nx.draw_networkx_labels( + graph, + pos=node_positions, + labels={node: data["visual"]["label"] for node, data in graph.nodes.items()}, + font_size=11.0, + font_weight='bold', + font_family='sans-serif', + alpha=1.0, + ) + nx.draw_networkx_edges( + graph, + pos=node_positions, + edge_color=[ + [0.0, 1.0, 0.0] if data["type"] == "excitatory" else [1.0, 0.0, 0.0] + for edge, data in graph.edges.items() + ], + width=1., + arrowsize=10, + style='dashed', + arrows=True, + min_source_margin=5, + min_target_margin=5, + connectionstyle="arc3,rad=-0.2", + ) + plt.figure() + sparse_array = nx.to_scipy_sparse_array(graph) + sns.heatmap( + sparse_array.todense(), cbar=False, square=True, + linewidths=0.5, + annot=True + ) + plt.show() + + # generate_tikz_figure( + # graph, + # paths.get_project_data_path().joinpath("templates", "network",), + # "tikz-full-network.tex", + # paths.get_project_images_path().joinpath("quadruped_network.tex") + # ) + + +def main(): + """Main.""" + + # Generate the network + profile.profile(generate_network) + + # Run the network + # run_network() + + +if __name__ == "__main__": + main() diff --git a/examples/ijspeert07/run.py b/examples/ijspeert07/run.py new file mode 100644 index 0000000..699e636 --- /dev/null +++ b/examples/ijspeert07/run.py @@ -0,0 +1,326 @@ +""" Generate and reproduce Ijspeert 07 Science paper +DOI: 10.1126/science.1138353 """ + + +import matplotlib.pyplot as plt +import networkx as nx +import numpy as np +import seaborn as sns +from farms_core.utils import profile +from farms_network.core import options +from farms_core.io.yaml import read_yaml +from farms_network.core.data import NetworkData +from farms_network.core.network import Network +from tqdm import tqdm +from farms_network.numeric.integrators_cy import RK4Solver +from scipy.integrate import ode, RK45, RK23 + +plt.rcParams['text.usetex'] = False + + +def join_strings(strings): + return "_".join(strings) + + +def oscillator_chain(network_options, n_oscillators, name_prefix, **kwargs): + """ Create a chain of n-oscillators. """ + # Define a network graph + + oscillator_names = [ + "{}_{}".format(name_prefix, n) + for n in range(n_oscillators) + ] + # Oscillators + intrinsic_frequency = kwargs.get('intrinsic_frequency', 1.0) + nominal_amplitude = kwargs.get('nominal_amplitude', 1.0) + amplitude_rate = kwargs.get('amplitude_rate', 20.0) + + origin = kwargs.get('origin', [0, 0]) + for j, osc in enumerate(oscillator_names): + network_options.add_node( + options.OscillatorNodeOptions( + name=osc, + parameters=options.OscillatorNodeParameterOptions.defaults( + intrinsic_frequency=intrinsic_frequency, + nominal_amplitude=nominal_amplitude, + amplitude_rate=amplitude_rate, + ), + visual=options.NodeVisualOptions( + label=f"{j}", color=[1.0, 0.0, 0.0] + ), + state=options.OscillatorStateOptions( + initial=[ + np.random.uniform(-np.pi, np.pi), + np.random.uniform(0, 1), + np.random.uniform(0, 1) + ] + ), + noise=None, + ) + ) + # Connect + phase_diff = kwargs.get('axial_phi', -np.pi/2) + weight = kwargs.get('axial_w', 1e2) + connections = np.vstack( + (np.arange(n_oscillators), + np.roll(np.arange(n_oscillators), -1)))[:, :-1] + for j in np.arange(n_oscillators-1): + network_options.add_edge( + options.OscillatorEdgeOptions( + source=oscillator_names[connections[0, j]], + target=oscillator_names[connections[1, j]], + weight=weight, + type="excitatory", + parameters=options.OscillatorEdgeParameterOptions( + phase_difference=-1*phase_diff + ), + visual=options.EdgeVisualOptions(), + ) + ) + + network_options.add_edge( + options.OscillatorEdgeOptions( + source=oscillator_names[connections[1, j]], + target=oscillator_names[connections[0, j]], + weight=weight, + type="excitatory", + parameters=options.OscillatorEdgeParameterOptions( + phase_difference=phase_diff + ), + visual=options.EdgeVisualOptions(), + ) + ) + return network_options + + +def oscillator_double_chain(network_options, n_oscillators, **kwargs): + """ Create a double chain of n-oscillators. """ + kwargs['origin'] = [-0.05, 0] + network_options = oscillator_chain(network_options, n_oscillators, 'left', **kwargs) + kwargs['origin'] = [0.05, 0] + network_options = oscillator_chain(network_options, n_oscillators, 'right', **kwargs) + + # Connect double chain + phase_diff = kwargs.get('anti_phi', 2*np.pi/3) + weight = kwargs.get('anti_w', 1e2) + for n in range(n_oscillators): + network_options.add_edge( + options.OscillatorEdgeOptions( + source=f'left_{n}', + target=f'right_{n}', + weight=weight, + type="excitatory", + parameters=options.OscillatorEdgeParameterOptions( + phase_difference=phase_diff + ), + visual=options.EdgeVisualOptions(), + ) + ) + network_options.add_edge( + options.OscillatorEdgeOptions( + source=f'right_{n}', + target=f'left_{n}', + weight=weight, + type="excitatory", + parameters=options.OscillatorEdgeParameterOptions( + phase_difference=-1*phase_diff + ), + visual=options.EdgeVisualOptions(), + ) + ) + return network_options + + +def generate_network(iterations=10000): + """ Generate network """ + + # Main network + network_options = options.NetworkOptions( + directed=True, + multigraph=False, + graph={"name": "ijspeert07"}, + integration=options.IntegrationOptions.defaults( + n_iterations=iterations, + timestep=float(1e-3), + ), + logs=options.NetworkLogOptions( + buffer_size=iterations, + ) + ) + + # Generate rhythm centers + n_oscillators = 9 + network_options = oscillator_double_chain(network_options, n_oscillators) + graph = nx.node_link_graph( + network_options, + directed=True, + multigraph=False, + edges="edges", + name="name", + source="source", + target="target" + ) + node_positions = nx.spring_layout(graph) + for index, node in enumerate(network_options.nodes): + node.visual.position[:2] = node_positions[node.name] + return network_options + + +def run_network(network_options: options.NetworkOptions): + """ Run network """ + + network = Network.from_options(network_options) + iterations = network_options.integration.n_iterations + timestep = network_options.integration.timestep + network.setup_integrator() + + # Setup integrators + rk4solver = RK4Solver(network.nstates, timestep) + + sc_integrator = RK45( + network.get_ode_func(), + t0=0.0, + y0=np.zeros(network.nstates,), + t_bound=iterations*timestep, + # max_step=timestep, + # first_step=timestep, + # rtol=1e-2, + # atol=1e2, + ) + + integrator = ode(network.get_ode_func()).set_integrator( + 'dopri5', + max_step=timestep, # your RK4 step + nsteps=10000, + ) + + nnodes = len(network_options.nodes) + integrator.set_initial_value(np.zeros(network.nstates,), 0.0) + + # Integrate + states = np.ones((iterations+1, network.nstates))*1.0 + outputs = np.ones((iterations, network.nnodes))*1.0 + + for iteration in tqdm(range(0, iterations), colour='green', ascii=' >='): + # network.data.times.array[iteration] = iteration*timestep + + # integrator.set_initial_value(integrator.y, integrator.t) + # integrator.integrate(integrator.t+timestep) + + # # sc_integrator.step() + + # # rk4solver.step(network._network_cy, iteration*timestep, network.data.states.array) + + # network._network_cy.update_logs(network._network_cy.iteration) + # network._network_cy.iteration += 1 + network.step(iteration*timestep) + network.update_logs(iteration*timestep) + + names = network.data.nodes.names() + plt.figure() + for j in range(int(network.nnodes/2)): + plt.fill_between( + np.array(network.log.times.array), + 2*j + (1 + np.sin(np.array(network.log.outputs.array[:, j]))), + 2*j, + alpha=0.2, + lw=1.0, + label=names[j] + ) + plt.plot( + np.array(network.log.times.array), + 2*j + (1 + np.sin(network.log.outputs.array[:, j])), + # label=f"{j}" + label="_nolegend_" + ) + for j in range(int(network.nnodes/2), int(network.nnodes)): + k = j - int(network.nnodes/2) + plt.fill_between( + np.array(network.log.times.array), + 2*k + (1 + np.sin(np.array(network.log.outputs.array[:, j]))), + 2*k, + alpha=0.2, + lw=1.0, + label=names[j] + ) + plt.plot( + np.array(network.log.times.array), + 2*k + (1 + np.sin(network.log.outputs.array[:, j])), + # label=f"{j}" + label="_nolegend_" + ) + plt.legend() + + graph = nx.node_link_graph( + network_options, + directed=True, + multigraph=False, + edges="edges", + name="name", + source="source", + target="target" + ) + + plt.figure() + + node_positions = nx.circular_layout(graph) + node_positions = nx.forceatlas2_layout(graph) + for index, node in enumerate(network_options.nodes): + node.visual.position[:2] = node_positions[node.name] + + _ = nx.draw_networkx_nodes( + graph, + pos=node_positions, + node_color=[data["visual"]["color"] for node, data in graph.nodes.items()], + alpha=0.25, + edgecolors='k', + linewidths=2.0, + ) + nx.draw_networkx_labels( + graph, + pos=node_positions, + labels={node: data["visual"]["label"] for node, data in graph.nodes.items()}, + font_size=11.0, + font_weight='bold', + font_family='sans-serif', + alpha=1.0, + ) + nx.draw_networkx_edges( + graph, + pos=node_positions, + edge_color=[ + [0.0, 1.0, 0.0] if data["type"] == "excitatory" else [1.0, 0.0, 0.0] + for edge, data in graph.edges.items() + ], + width=1., + alpha=np.array(network.data.connectivity.weights)/1e2, + arrowsize=10, + style='dashed', + arrows=True, + min_source_margin=5, + min_target_margin=5, + connectionstyle="arc3,rad=-0.2", + ) + plt.figure() + sparse_array = nx.to_scipy_sparse_array(graph) + sns.heatmap( + sparse_array.todense(), cbar=False, square=True, + linewidths=0.5, + annot=True + ) + plt.show() + + +def main(): + """Main.""" + + # Generate the network + network = generate_network() + profile.profile(run_network, network) + + # Run the network + # run_network() + + +if __name__ == "__main__": + main() diff --git a/examples/mouse/components.py b/examples/mouse/components.py new file mode 100644 index 0000000..ac3829e --- /dev/null +++ b/examples/mouse/components.py @@ -0,0 +1,1972 @@ +""" Components """ + +import os +from pprint import pprint +from typing import Iterable, List + +from farms_core import pylog +import matplotlib.pyplot as plt +import networkx as nx +import numpy as np +from farms_core.io.yaml import read_yaml +from farms_network.core import options +from farms_network.core.data import NetworkData +from farms_network.core.network import Network +from jinja2 import Environment, FileSystemLoader +from tqdm import tqdm + +plt.rcParams["text.usetex"] = True + + +def calculate_arc_rad(source_pos, target_pos, base_rad=-0.1): + """Calculate arc3 radius for edge based on node positions.""" + dx = target_pos[0] - source_pos[0] + dy = target_pos[1] - source_pos[1] + + # Set curvature to zero if nodes are aligned horizontally or vertically + if dx == 0 or dy == 0: + return 0.0 + + # Decide on curvature based on position differences + if abs(dx) > abs(dy): + # Horizontal direction - positive rad for up, negative for down + return -base_rad if dy >= 0 else base_rad + else: + # Vertical direction - positive rad for right, negative for left + return base_rad if dx >= 0 else base_rad + + +def update_edge_visuals(network_options): + """ Update edge options """ + + nodes = network_options.nodes + edges = network_options.edges + for edge in edges: + base_rad = calculate_arc_rad( + nodes[nodes.index(edge.source)].visual.position, + nodes[nodes.index(edge.target)].visual.position, + ) + edge.visual.connectionstyle = f"arc3,rad={base_rad*0.0}" + return network_options + + +def join_str(strings): + return "_".join(filter(None, strings)) + + +def multiply_transform(vec: np.ndarray, transform_mat: np.ndarray) -> np.ndarray: + """ + Multiply a 2D vector with a 2D transformation matrix (3x3). + + Parameters: + vec (np.ndarray): A 2D vector (shape (2,) or (3,)) + transform_mat (np.ndarray): A 3x3 transformation matrix. + + Returns: + np.ndarray: The transformed vector. + """ + + assert transform_mat.shape == (3, 3), "Transformation matrix must be 3x3" + + # Ensure vec is in homogeneous coordinates (i.e., 3 elements). + if vec.shape == (2,): + vec = np.append(vec, 1) + elif vec.shape != (3,): + raise ValueError("Input vector must have shape (2,) or (3,)") + + # Perform the multiplication + return transform_mat @ vec + + +def get_scale_matrix(scale: float) -> np.ndarray: + """Return a scaling matrix.""" + return np.array([[scale, 0, 0], [0, scale, 0], [0, 0, 1]]) + + +def get_mirror_matrix(mirror_x: bool = False, mirror_y: bool = False) -> np.ndarray: + """Return a mirror matrix based on the mirror flags.""" + mirror_matrix = np.identity(3) + if mirror_x: + mirror_matrix = np.array([[1, 0, 0], [0, -1, 0], [0, 0, 1]]) + if mirror_y: + mirror_matrix = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, 1]]) + return mirror_matrix + + +def get_translation_matrix(off_x: float, off_y: float) -> np.ndarray: + """Return a translation matrix.""" + return np.array([[1, 0, off_x], [0, 1, off_y], [0, 0, 1]]) + + +def get_rotation_matrix(angle: float) -> np.ndarray: + """Return a rotation matrix for the given angle in degrees.""" + angle_rad = np.radians(angle) + return np.array( + [ + [np.cos(angle_rad), -np.sin(angle_rad), 0], + [np.sin(angle_rad), np.cos(angle_rad), 0], + [0, 0, 1], + ] + ) + + +def get_transform_mat( + angle: float, + off_x: float, + off_y: float, + mirror_x: bool = False, + mirror_y: bool = False, + scale: float = 2.5, +) -> np.ndarray: + """Return a complete transformation matrix based on input parameters.""" + scale_matrix = get_scale_matrix(scale) + mirror_matrix = get_mirror_matrix(mirror_x, mirror_y) + translation_matrix = get_translation_matrix(off_x, off_y) + rotation_matrix = get_rotation_matrix(angle) + + # Combine the transformations in the correct order: translation -> rotation -> mirror -> scale + transform_matrix = translation_matrix @ rotation_matrix @ mirror_matrix + transform_matrix = scale_matrix @ transform_matrix + + return transform_matrix + + +def create_node( + base_name: str, + node_id: str, + node_type: str, + position_vec: np.ndarray, + label: str, + color: list, + transform_mat: np.ndarray, + states: dict, + parameters: dict, +) -> options.LIDannerNodeOptions: + """ + Function to create a node with visual and state options. + + Parameters: + base_name (str): The base name to prepend to node_id. + node_id (str): Unique identifier for the node. + position_vec (np.ndarray): The position of the node. + label (str): The visual label for the node. + color (list): RGB color values for the node. + node_type (str): Type of the node ('LINaPDanner' or 'LIDanner'). + transform_mat (np.ndarray): Transformation matrix for positioning. + v0 (float): Initial value for the state option 'v0'. + h0 (float, optional): Initial value for the state option 'h0', only used for some node types. + + Returns: + options.LIDannerNodeOptions: The configured node options object. + """ + # Generate the full name and position + full_name = join_str((base_name, node_id)) + position = multiply_transform(position_vec, transform_mat).tolist() + + # Determine node type and state options + visual_options = options.NodeVisualOptions( + position=position, + label=label, + color=color, + ) + if node_type == "LINaPDanner": + state_options = options.LINaPDannerStateOptions(list(states.values())) + parameters = options.LINaPDannerNodeParameterOptions.defaults(**parameters) + noise = options.OrnsteinUhlenbeckOptions.defaults() + node_options_class = options.LINaPDannerNodeOptions + elif node_type == "LIDanner": + state_options = options.LIDannerStateOptions(list(states.values())) + parameters = options.LIDannerNodeParameterOptions.defaults(**parameters) + noise = options.OrnsteinUhlenbeckOptions.defaults() + node_options_class = options.LIDannerNodeOptions + elif node_type == "Linear": + state_options = None + parameters = options.LinearParameterOptions.defaults(**parameters) + noise = None + node_options_class = options.LinearNodeOptions + elif node_type == "ReLU": + state_options = None + parameters = options.ReLUParameterOptions.defaults(**parameters) + noise = None + node_options_class = options.ReLUNodeOptions + elif node_type == "Relay": + state_options = None + parameters = None + noise = None + visual_options.radius = 0.0 + node_options_class = options.RelayNodeOptions + else: + raise ValueError(f"Unknown node type: {node_type}") + + # Create and return the node options + return node_options_class( + name=full_name, + parameters=parameters, + visual=visual_options, + state=state_options, + noise=noise, + ) + + +def create_nodes( + node_specs: Iterable, + base_name: str, + transform_mat: np.ndarray, +) -> dict[str, options.NodeOptions]: + """Create node using create_method""" + nodes = {} + for ( + node_id, + node_type, + position_vec, + label, + color, + states, + parameters, + ) in node_specs: + nodes[node_id] = create_node( + base_name, + node_id, + node_type, + position_vec, + label, + color, + transform_mat, + states, + parameters, + ) + return nodes + + +def create_edges( + edge_specs: Iterable, + base_name: str, + visual_options: options.EdgeVisualOptions = options.EdgeVisualOptions(), +) -> dict[str, options.EdgeOptions]: + """Create edges from specs""" + edges = {} + visual_options = options.EdgeVisualOptions(**visual_options) + for source_tuple, target_tuple, weight, edge_type in edge_specs: + source = join_str((base_name, *source_tuple)) + target = join_str((base_name, *target_tuple)) + edges[join_str((source, "to", target))] = options.EdgeOptions( + source=source, + target=target, + weight=weight, + type=edge_type, + visual=visual_options, + ) + return edges + + +class BrainStemDrive: + """ Generate Brainstem drive network """ + + def __init__(self, name="", transform_mat=np.identity(3)): + """Initialization.""" + super().__init__() + self.name = name + self.transform_mat = transform_mat + + def nodes(self): + """Add nodes.""" + node_specs = [ + ( + join_str(("BS", "input")), + "Relay", + np.array((3.0, 0.0)), + "A", + [0.0, 0.0, 0.0], + {}, + {}, + ), + ( + join_str(("BS", "DR")), + "Linear", + np.array((3.0, -1.0)), + "A", + [0.0, 0.0, 0.0], + None, + {"slope": 1.0, "bias": 0.0}, + ), + ] + + # Loop through the node specs to create each node using the create_node function + nodes = create_nodes(node_specs, self.name, self.transform_mat) + return nodes + + def edges(self): + """Add edges.""" + + # Define edge details in a list for easier iteration + edge_specs = [ + (("BS", "input"), ("BS", "DR"), 1.0, "excitatory"), + ] + + # Loop through the edge specs to create each edge + edges = create_edges(edge_specs, self.name) + return edges + + +class RhythmGenerator: + """Generate RhythmGenerator Network""" + + def __init__(self, name="", transform_mat=np.identity(3)): + """Initialization.""" + super().__init__() + self.name = name + self.transform_mat = transform_mat + + def nodes(self): + """Add nodes.""" + node_specs = [ + ( + join_str(("RG", "F")), + "LINaPDanner", + np.array((3.0, 0.0)), + "F", + [1.0, 0.0, 0.0], + {"v": -62.5, "h": np.random.uniform(0, 1)}, + {}, + ), + ( + join_str(("RG", "E")), + "LINaPDanner", + np.array((-3.0, 0.0)), + "E", + [0.0, 1.0, 0.0], + {"v": -62.5, "h": np.random.uniform(0, 1)}, + {}, + ), + ( + join_str(("RG", "In", "F")), + "LIDanner", + np.array((1.0, -1.5)), + "In", + [0.2, 0.2, 0.2], + {"v": -60.0, "a": 0.0}, + {}, + ), + ( + join_str(("RG", "In", "E")), + "LIDanner", + np.array((-1.0, 1.5)), + "In", + [0.2, 0.2, 0.2], + {"v": -60.0, "a": 0.0}, + {}, + ), + ( + join_str(("RG", "In", "E2")), + "LIDanner", + np.array((-5.0, 1.0)), + "In", + [0.2, 0.2, 0.2], + {"v": -60.0, "a": 0.0}, + {}, + ), + ( + join_str(("RG", "F", "DR")), + "Linear", + np.array((3.0, 2.0)), + "d", + [0.5, 0.5, 0.5], # Default visual color if needed + None, + {"slope": 0.1, "bias": 0.0}, + ), + ( + join_str(("RG", "E", "DR")), + "Linear", + np.array((-3.0, 2.0)), + "d", + [0.5, 0.5, 0.5], # Default visual color if needed + None, + {"slope": 0.0, "bias": 0.1}, + ), + ] + + # Loop through the node specs to create each node using the create_node function + nodes = create_nodes(node_specs, self.name, self.transform_mat) + return nodes + + def edges(self): + """Add edges.""" + + # Define edge details in a list for easier iteration + edge_specs = [ + (("RG", "F"), ("RG", "In", "F"), 0.4, "excitatory"), + (("RG", "In", "F"), ("RG", "E"), -1.0, "inhibitory"), + (("RG", "E"), ("RG", "In", "E"), 0.4, "excitatory"), + (("RG", "In", "E"), ("RG", "F"), -0.08, "inhibitory"), + (("RG", "In", "E2"), ("RG", "F"), -0.04, "inhibitory"), + (("RG", "F", "DR"), ("RG", "F"), 1.0, "excitatory"), + (("RG", "E", "DR"), ("RG", "E"), 1.0, "excitatory"), + ] + + # Loop through the edge specs to create each edge + edges = create_edges(edge_specs, self.name) + return edges + + +class PatternFormation: + """Generate PatternFormation Network""" + + def __init__(self, name="", transform_mat=np.identity(3)): + """Initialization.""" + super().__init__() + self.name = name + self.transform_mat = transform_mat + + def nodes(self): + """Add nodes.""" + nodes = {} + + node_specs = [ + ( + join_str(("PF", "FA")), + "LINaPDanner", + np.array((-3.0, 0.0)), + "F\\textsubscript{A}", + [1.0, 0.0, 0.0], + {"v": -60.0, "h": np.random.uniform(0, 1)}, + {"g_nap": 0.125, "e_leak": -67.5}, + ), + ( + join_str(("PF", "EA")), + "LINaPDanner", + np.array((-9.0, 0.0)), + "E\\textsubscript{A}", + [0.0, 1.0, 0.0], + {"v": -60.0, "h": np.random.uniform(0, 1)}, + {"g_nap": 0.125, "e_leak": -67.5}, + ), + ( + join_str(("PF", "In", "FA")), + "LIDanner", + np.array((-5.0, -1.5)), + "In\\textsubscript{A}", + [0.2, 0.2, 0.2], + {"v": -60.0, "a": 0.0}, + {}, + ), + ( + join_str(("PF", "In", "EA")), + "LIDanner", + np.array((-7.0, 1.5)), + "In\\textsubscript{A}", + [0.2, 0.2, 0.2], + {"v": -60.0, "a": 0.0}, + {}, + ), + ( + join_str(("PF", "FB")), + "LINaPDanner", + np.array((9.0, 0.0)), + "F\\textsubscript{B}", + [1.0, 0.0, 0.0], + {"v": -60.0, "h": np.random.uniform(0, 1)}, + {"g_nap": 0.125, "g_leak": 1.0, "e_leak": -67.5}, + ), + ( + join_str(("PF", "EB")), + "LINaPDanner", + np.array((3.0, 0.0)), + "E\\textsubscript{B}", + [0.0, 1.0, 0.0], + {"v": -60.0, "h": np.random.uniform(0, 1)}, + {"g_nap": 0.125, "g_leak": 1.0, "e_leak": -67.5}, + ), + ( + join_str(("PF", "In", "FB")), + "LIDanner", + np.array((7.0, -1.5)), + "In\\textsubscript{B}", + [0.2, 0.2, 0.2], + {"v": -60.0, "a": 0.0}, + {}, + ), + ( + join_str(("PF", "In", "EB")), + "LIDanner", + np.array((5.0, 1.5)), + "In\\textsubscript{B}", + [0.2, 0.2, 0.2], + {"v": -60.0, "a": 0.0}, + {}, + ), + ( + join_str(("PF", "In2", "F")), + "LIDanner", + np.array((9.0, -3.0)), + "In\\textsubscript{2F}", + [0.2, 0.2, 0.2], + {"v": -60.0, "a": 0.0}, + {"g_leak": 5.0}, + ), + ( + join_str(("PF", "In2", "E")), + "LIDanner", + np.array((3.0, -3.0)), + "In\\textsubscript{2E}", + [0.2, 0.2, 0.2], + {"v": -60.0, "a": 0.0}, + {"g_leak": 5.0}, + ), + ( + join_str(("PF", "FA", "DR")), + "Linear", + np.array((-3.0, 2.0)), + "d", + [0.5, 0.5, 0.5], # Default visual color if needed + None, + {"slope": 0.6, "bias": 0.0}, + ), + ( + join_str(("PF", "EA", "DR")), + "Linear", + np.array((-9.0, 2.0)), + "d", + [0.5, 0.5, 0.5], # Default visual color if needed + None, + {"slope": 0.6, "bias": 0.0}, + ), + ( + join_str(("PF", "EB", "DR")), + "Linear", + np.array((3.0, 2.0)), + "d", + [0.5, 0.5, 0.5], # Default visual color if needed + None, + {"slope": 0.07, "bias": 0.0}, + ), + ( + join_str(("PF", "In2", "E", "DR")), + "Linear", + np.array((4.0, -3.0)), + "d", + [0.5, 0.5, 0.5], # Default visual color if needed + None, + {"slope": 0.1, "bias": 0.0}, + ), + ] + + # Loop through the node specs to create each node using the create_node function + nodes = create_nodes(node_specs, self.name, self.transform_mat) + return nodes + + def edges(self): + """Add edges.""" + edges = {} + + # Define edge details in a list for easier iteration + edge_specs = [ + (("PF", "FA"), ("PF", "In", "FA"), 0.8, "excitatory"), + (("PF", "EA"), ("PF", "In", "EA"), 1.0, "excitatory"), + (("PF", "In", "FA"), ("PF", "EA"), -1.5, "inhibitory"), + (("PF", "In", "EA"), ("PF", "FA"), -1.0, "inhibitory"), + + (("PF", "FB"), ("PF", "In", "FB"), 1.5, "excitatory"), + (("PF", "EB"), ("PF", "In", "EB"), 1.5, "excitatory"), + (("PF", "In", "FB"), ("PF", "EB"), -2.0, "inhibitory"), + (("PF", "In", "EB"), ("PF", "FB"), -0.25, "inhibitory"), + + (("PF", "In", "FA"), ("PF", "EB"), -0.5, "inhibitory"), + (("PF", "In", "FA"), ("PF", "FB"), -0.1, "inhibitory"), + (("PF", "In", "EA"), ("PF", "EB"), -0.5, "inhibitory"), + (("PF", "In", "EA"), ("PF", "FB"), -0.25, "inhibitory"), + + (("PF", "In", "FB"), ("PF", "EA"), -0.5, "inhibitory"), + (("PF", "In", "FB"), ("PF", "FA"), -0.75, "inhibitory"), + (("PF", "In", "EB"), ("PF", "EA"), -2.0, "inhibitory"), + (("PF", "In", "EB"), ("PF", "FA"), -2.0, "inhibitory"), + + (("PF", "In2", "F"), ("PF", "FB"), -3.0, "inhibitory"), + (("PF", "In2", "E"), ("PF", "EB"), -3.0, "inhibitory"), + + (("PF", "FA", "DR"), ("PF", "FA"), 1.0, "excitatory"), + (("PF", "EA", "DR"), ("PF", "EA"), 1.0, "excitatory"), + (("PF", "EB", "DR"), ("PF", "EB"), 1.0, "excitatory"), + (("PF", "In2", "E", "DR"), ("PF", "In2", "E"), -1.0, "inhibitory"), + ] + + # Loop through the edge specs to create each edge + edges = create_edges(edge_specs, self.name) + return edges + + +class Commissural: + """Generate Commissural Network""" + + def __init__(self, name="", transform_mat=np.identity(3)): + """Initialization.""" + super().__init__() + self.name = name + self.transform_mat = transform_mat + + def nodes(self): + """Add nodes.""" + node_specs = [ + ( + "V2a", + "LIDanner", + np.array((0.0, 2.0, 1.0)), + "V2\\textsubscript{a}", + [0.0, 1.0, 0.0], + {"v": -60.0, "a": 0.0}, + {}, + ), + ( + "InV0V", + "LIDanner", + np.array((0.0, 0.0, 1.0)), + "In\\textsubscript{i}", + [1.0, 0.0, 1.0], + {"v": -60.0, "a": 0.0}, + {}, + ), + ( + "V0V", + "LIDanner", + np.array((2.0, 0.5, 1.0)), + "V0\\textsubscript{V}", + [0.0, 1.0, 0.0], + {"v": -60.0, "a": 0.0}, + {}, + ), + ( + "V0D", + "LIDanner", + np.array((2.0, -2.0, 1.0)), + "V0\\textsubscript{D}", + [1.0, 0.0, 1.0], + {"v": -60.0, "a": 0.0}, + {}, + ), + ( + "V3E", + "LIDanner", + np.array((2.0, 3.0, 1.0)), + "V3\\textsubscript{E}", + [0.0, 1.0, 0.0], + {"v": -60.0, "a": 0.0}, + {}, + ), + ( + "V3F", + "LIDanner", + np.array((2.0, -4.0, 1.0)), + "V3\\textsubscript{F}", + [0.0, 1.0, 0.0], + {"v": -60.0, "a": 0.0}, + {}, + ), + ( + join_str(("V0V", "DR")), + "Linear", + np.array((3.0, 1.0)), + "d", + [0.5, 0.5, 0.5], + None, + {"slope": 0.15, "bias": 0.0}, + ), + ( + join_str(("V0D", "DR")), + "Linear", + np.array((3.0, -2.5)), + "d", + [0.5, 0.5, 0.5], + None, + {"slope": 0.75, "bias": 0.0}, + ), + ] + + # Loop through the node specs to create each node using the create_node function + nodes = create_nodes(node_specs, self.name, self.transform_mat) + return nodes + + def edges(self): + """Add edges.""" + edges = {} + + # Define edge details in a list for easier iteration + edge_specs = [ + (("V0V", "DR"), ("V0V",), -1.0, "inhibitory"), + (("V0D", "DR"), ("V0D",), -1.0, "inhibitory"), + ] + + # Loop through the edge specs to create each edge + edges = create_edges(edge_specs, self.name) + return edges + + +class LPSN: + """Generate Long Propriospinal Network""" + + def __init__(self, name="", transform_mat=np.identity(3)): + """Initialization.""" + super().__init__() + self.name = name + self.transform_mat = transform_mat + + def nodes(self): + """Add nodes.""" + + # Define node specs in a list for easier iteration + node_specs = [ + ( + join_str(("V0D", "diag")), + "LIDanner", + np.array((0.0, 0.0, 1.0)), + "V0\\textsubscript{D}", + [0.5, 0.0, 0.5], + {"v": -60.0, "a": 0.0}, + {}, + ), + ( + join_str(("fore", "V0V", "diag")), + "LIDanner", + np.array((0.0, -1.25, 1.0)), + "V0\\textsubscript{V}", + [0.0, 1.0, 0.0], + {"v": -60.0, "a": 0.0}, + {}, + ), + ( + join_str(("hind", "V3", "diag")), + "LIDanner", + np.array((0.0, -4.0, 1.0)), + "V3\\textsubscript{a}", + [0.0, 1.0, 0.0], + {"v": -60.0, "a": 0.0}, + {}, + ), + ( + join_str(("fore", "Ini", "hom")), + "LIDanner", + np.array((-4.0, 0.0, 1.0)), + "LPN\\textsubscript{i}", + [0.0, 1.0, 0.0], + {"v": -60.0, "a": 0.0}, + {}, + ), + ( + join_str(("fore", "Sh2", "hom")), + "LIDanner", + np.array((-8.0, 0.0, 1.0)), + "Sh\\textsubscript{2}", + [0.0, 1.0, 0.0], + {"v": -60.0, "a": 0.0}, + {}, + ), + ( + join_str(("hind", "Sh2", "hom")), + "LIDanner", + np.array((-8.0, -4.0, 1.0)), + "Sh\\textsubscript{2}", + [0.0, 1.0, 0.0], + {"v": -60.0, "a": 0.0}, + {}, + ), + ( + join_str(("V0D", "diag", "DR")), + "Linear", + np.array((1.0, 0.5)), + "d", + [0.5, 0.5, 0.5], + None, + {"slope": 0.75, "bias": 0.0}, + ), + ] + + # Loop through the node specs to create each node using the create_node function + nodes = create_nodes(node_specs, self.name, self.transform_mat) + return nodes + + def edges(self): + """Add edges.""" + edges = {} + + # Define edge details in a list for easier iteration + edge_specs = [ + (("V0D", "diag", "DR"), ("V0D", "diag"), -1.0, "inhibitory") + ] + + # Loop through the edge specs to create each edge + edges = create_edges(edge_specs, self.name) + return edges + + +class MotorLayer: + """Motorneurons and other associated interneurons.""" + + def __init__(self, muscles: List[str], name="", transform_mat=np.identity(3)): + """Initialization.""" + self.name = name + self.muscles = muscles + self.transform_mat = transform_mat + + def nodes(self): + """Add neurons for the motor layer.""" + + spacing = 2.5 + max_muscles = max(len(self.muscles["agonist"]), len(self.muscles["antagonist"])) + + node_specs = [] + # Define neurons for the muscles + for x_off, muscle in zip( + self._generate_positions(len(self.muscles["agonist"]), spacing), + self.muscles["agonist"], + ): + node_specs.extend(self._get_muscle_neurons(muscle, x_off, 0.0)) + + for x_off, muscle in zip( + self._generate_positions(len(self.muscles["antagonist"]), spacing), + self.muscles["antagonist"], + ): + node_specs.extend( + self._get_muscle_neurons(muscle, x_off, 5.0, mirror_y=True) + ) + + # Calculate x positions for Ia inhibitory neurons + IaIn_x_positions = np.linspace(-spacing * max_muscles, spacing * max_muscles, 4) + y_off = 1.75 + + node_specs += [ + ( + join_str(("Ia", "In", "EA")), + "LIDanner", + np.array((IaIn_x_positions[0], y_off, 1.0)), + "Ia\\textsubscript{ea}", + [0.0, 0.0, 1.0], + {"v": -60.0, "a": 0.0}, + {}, + ), + ( + join_str(("Ia", "In", "EB")), + "LIDanner", + np.array((IaIn_x_positions[1], y_off, 1.0)), + "Ia\\textsubscript{eb}", + [0.0, 0.0, 1.0], + {"v": -60.0, "a": 0.0}, + {}, + ), + ( + join_str(("Ia", "In", "FA")), + "LIDanner", + np.array((IaIn_x_positions[2], y_off, 1.0)), + "Ia\\textsubscript{fa}", + [0.0, 0.0, 1.0], + {"v": -60.0, "a": 0.0}, + {}, + ), + ( + join_str(("Ia", "In", "FB")), + "LIDanner", + np.array((IaIn_x_positions[3], y_off, 1.0)), + "Ia\\textsubscript{fb}", + [0.0, 0.0, 1.0], + {"v": -60.0, "a": 0.0}, + {}, + ), + ( + join_str(("Ib", "In", "RG")), + "LIDanner", + np.array((np.mean(IaIn_x_positions), y_off - spacing, 1.0)), + "Ib\\textsubscript{rg}", + [0.0, 0.0, 1.0], + {"v": -60.0, "a": 0.0}, + {}, + ), + ] + + # Loop through the node specs to create each node using the create_node function + nodes = create_nodes(node_specs, self.name, self.transform_mat) + return nodes + + def edges(self): + """Add motor feedback connections.""" + edges = {} + + # Define edges for each muscle + for muscle in self.muscles["agonist"]: + edges.update(self._generate_motor_connections(muscle)) + for muscle in self.muscles["antagonist"]: + edges.update(self._generate_motor_connections(muscle)) + return edges + + def _generate_motor_connections(self, muscle): + """Generate the motor connections for a specific muscle.""" + edge_specs = [ + *MotorLayer.connect_Rn_reciprocal_inhibition(muscle), + *MotorLayer.connect_Ia_monosypatic_excitation(muscle), + *MotorLayer.connect_Ib_disynaptic_inhibition(muscle), + ] + # Loop through the edge specs to create each edge + edges = create_edges(edge_specs, self.name) + return edges + + def _get_muscle_neurons(self, muscle, x_off, y_off, mirror_y=False): + """Return neuron specifications for a muscle.""" + mirror_y_sign = -1 if mirror_y else 1 + return [ + ( + join_str((muscle["name"], "Mn")), + "LIDanner", + np.array((x_off, y_off, 1.0)), + "Mn", + [1.0, 0.0, 1.0], + {"v": -60.0, "a": 0.0}, + {"e_leak": -52.5, "g_leak": 1.0}, + ), + ( + join_str((muscle["name"], "Ia")), + "Relay", + np.array((x_off - 0.5, y_off + 0.75 * mirror_y_sign, 1.0)), + "Ia", + [1.0, 0.0, 0.0], + {}, + {}, + ), + ( + join_str((muscle["name"], "II")), + "Relay", + np.array((x_off, y_off + 0.75 * mirror_y_sign, 1.0)), + "II", + [1.0, 0.0, 0.0], + {}, + {}, + ), + ( + join_str((muscle["name"], "Ib")), + "Relay", + np.array((x_off + 0.5, y_off + 0.75 * mirror_y_sign, 1.0)), + "Ib", + [1.0, 0.0, 0.0], + {}, + {}, + ), + ( + join_str((muscle["name"], "Rn")), + "LIDanner", + np.array((x_off + 0.5, y_off - 1.0 * mirror_y_sign, 1.0)), + "Rn", + [1.0, 0.0, 1.0], + {"v": -60.0, "a": 0.0}, + {}, + ), + ( + join_str((muscle["name"], "Ib", "In", "i")), + "LIDanner", + np.array((x_off + 1.0, y_off, 1.0)), + "Ib\\textsubscript{i}", + [0.0, 0.0, 1.0], + {"v": -60.0, "a": 0.0}, + {}, + ), + ( + join_str((muscle["name"], "Ib", "In", "e")), + "LIDanner", + np.array((x_off + 1.0, y_off + 1.5 * mirror_y_sign, 1.0)), + "Ib\\textsubscript{e}", + [0.0, 0.0, 1.0], + {"v": -60.0, "a": 0.0}, + {}, + ), + ( + join_str((muscle["name"], "II", "In", "RG")), + "LIDanner", + np.array((x_off - 1.0, y_off, 1.0)), + "II\\textsubscript{RG}", + [0.0, 0.0, 1.0], + {"v": -60.0, "a": 0.0}, + {}, + ), + ] + + def _generate_positions(self, num_muscles, spacing): + """Generate positions for the neurons.""" + return np.linspace(-spacing * num_muscles, spacing * num_muscles, num_muscles) + + @staticmethod + def connect_Ia_monosypatic_excitation(muscle: str): + edge_specs = [ + ((muscle["name"], "Ia"), (muscle["name"], "Mn"), 0.01, "excitatory"), + ] + return edge_specs + + @staticmethod + def connect_Ib_disynaptic_inhibition(muscle: str): + edge_specs = [ + ((muscle["name"], "Ib"), (muscle["name"], "Ib", "In", "i"), 0.01, "excitatory"), + ((muscle["name"], "Ib", "In", "i"), (muscle["name"], "Mn"), -0.01, "inhibitory"), + ] + return edge_specs + + @staticmethod + def connect_Rn_reciprocal_inhibition(muscle: str): + """ Renshaw reciprocal inhibition """ + edge_specs = [ + ((muscle["name"], "Mn"), (muscle["name"], "Rn"), 0.01, "excitatory"), + ((muscle["name"], "Rn"), (muscle["name"], "Mn"), -0.01, "inhibitory"), + ] + return edge_specs + + +##################### +# Vestibular System # +##################### +class VestibularSystem: + """Generate Vestibular System Network""" + + def __init__(self, side="", rate="", axis="", transform_mat=np.identity(3)): + """Initialization.""" + self.side = side + self.rate = rate + self.axis = axis + self.transform_mat = transform_mat + + def nodes(self): + """Define nodes.""" + directions = ("cclock", "clock") + node_specs = [ + ( + join_str((self.side, self.rate, self.axis, "Vn")), + "Relay", + np.array((0.0, 0.0)), + "Vn", + [1.0, 1.0, 0.0], # Yellow for sensory neuron + None, + {}, + ) + ] + + for direction, x_offset in zip(directions, (-1.0, 1.0)): + # ReLU Interneurons + node_specs.append( + ( + join_str((self.side, self.rate, self.axis, direction, "ReLU", "Vn")), + "ReLU", + np.array((0.0 - x_offset, 1.0)), + "ReLU", + [0.2, 0.2, 1.0], # Blue for interneurons + None, + {"sign": 1.0 if direction == "cclock" else -1.0, "offset": np.deg2rad(10), "gain": 0.5}, + ) + ) + # Inhibitory Interneurons + node_specs.append( + ( + join_str((self.side, self.rate, self.axis, direction, "In", "Vn")), + "LIDanner", + np.array((0.0 - x_offset, 2.0)), + "In", + [0.5, 0.5, 0.5], # Gray for inhibitory interneurons + {"v": -60.0, "a": 0.0}, + {}, + ) + ) + + # Create nodes using the `create_nodes` utility + return create_nodes(node_specs, base_name="", transform_mat=self.transform_mat) + + def edges(self): + """Define edges.""" + directions = ("cclock", "clock") + edge_specs = [] + + for direction in directions: + edge_specs.append( + ( + (self.side, self.rate, self.axis, "Vn"), + (self.side, self.rate, self.axis, direction, "ReLU", "Vn"), + 1.0, + "excitatory", + ) + ) + edge_specs.append( + ( + (self.side, self.rate, self.axis, direction, "ReLU", "Vn"), + (self.side, self.rate, self.axis, direction, "In", "Vn"), + 1.0, + "excitatory", + ) + ) + + # Create edges using the `create_edges` utility + return create_edges(edge_specs, base_name="") + + +######################## +# SomatoSensory System # +######################## +class SomatoSensory: + """Generate Afferents Network""" + + def __init__(self, contacts, name="", transform_mat=np.identity(3)): + """Initialization.""" + self.contacts = contacts + self.name = name + self.transform_mat = transform_mat + + def nodes(self): + """Define nodes.""" + n_contacts = len(self.contacts) + node_y_pos = np.linspace(-1 * n_contacts, 1 * n_contacts, n_contacts) + node_specs = [] + + for y_pos, contact in zip(node_y_pos, self.contacts): + node_specs.append( + ( + join_str((contact, "cut")), + "Relay", + np.array((0.0, y_pos)), + "C", + [1.0, 0.0, 0.0], + {}, + {} + ) + ) + + node_specs.append( + ( + join_str(("In", "cut")), + "LIDanner", + np.array((-1.0, np.mean(node_y_pos))), + "In\\textsubscript{C}", + [1.0, 0.0, 0.0], + {"v": -60.0, "a": 0.0}, + {} + ) + ) + + return create_nodes(node_specs, base_name=self.name, transform_mat=self.transform_mat) + + def edges(self): + """Define edges.""" + edge_specs = [] + + for contact in self.contacts: + edge_specs.append( + ((contact, "cut"), ("In", "cut"), 1.0, "excitatory") + ) + + return create_edges(edge_specs, base_name=self.name) + + +###################### +# CONNECT RG PATTERN # +###################### +def connect_rhythm_pattern(base_name): + """Connect RG's to pattern formation.""" + + edge_specs = [ + (("RG", "F"), ("PF", "FA"), 0.8, "excitatory"), + (("RG", "E"), ("PF", "EA"), 0.7, "excitatory"), + + (("RG", "F"), ("PF", "FB"), 0.6, "excitatory"), + (("RG", "E"), ("PF", "EB"), 0.5, "excitatory"), + + (("RG", "F"), ("PF", "In2", "F"), 0.4, "excitatory"), + (("RG", "E"), ("PF", "In2", "E"), 0.35, "excitatory"), + + (("RG", "In", "F"), ("PF", "EA"), -1.5, "inhibitory"), + (("RG", "In", "E"), ("PF", "FA"), -1.5, "inhibitory"), + ] + + # Use create_edges function to generate the edge options + return create_edges( + edge_specs, + base_name=base_name, + visual_options=options.EdgeVisualOptions() + ) + + +########################## +# Connect RG COMMISSURAL # +########################## +def connect_rg_commissural(): + """Connect RG's to Commissural.""" + + edge_specs = [] + + for limb in ("hind", "fore"): + for side in ("left", "right"): + edge_specs.extend([ + ((side, limb, "RG", "F"), (side, limb, "V2a"), 1.0, "excitatory"), + ((side, limb, "RG", "F"), (side, limb, "V0D"), 0.7, "excitatory"), + ((side, limb, "RG", "F"), (side, limb, "V3F"), 0.35, "excitatory"), + ((side, limb, "RG", "E"), (side, limb, "V3E"), 0.35, "excitatory"), + ((side, limb, "V2a"), (side, limb, "V0V"), 1.0, "excitatory"), + ((side, limb, "InV0V"), (side, limb, "RG", "F"), -0.07, "inhibitory") + ]) + + # Handle cross-limb connections + for sides in (("left", "right"), ("right", "left")): + edge_specs.extend([ + ((sides[0], limb, "V0V"), (sides[1], limb, "InV0V"), 0.6, "excitatory"), + ((sides[0], limb, "V0D"), (sides[1], limb, "RG", "F"), -0.07, "inhibitory"), + ((sides[0], limb, "V3F"), (sides[1], limb, "RG", "F"), 0.03, "excitatory"), + ((sides[0], limb, "V3E"), (sides[1], limb, "RG", "E"), 0.02, "excitatory"), + ((sides[0], limb, "V3E"), (sides[1], limb, "RG", "In", "E"), 0.0, "excitatory"), + ((sides[0], limb, "V3E"), (sides[1], limb, "RG", "In", "E2"), 0.8, "excitatory"), + ]) + + # Create the edges using create_edges + edges = create_edges( + edge_specs, base_name="", visual_options=options.EdgeVisualOptions() + ) + return edges + + +############################## +# Connect Patter Commissural # +############################## +def connect_pattern_commissural(): + + edge_specs = [] + + for limb in ("hind",): + for side in ("left", "right"): + edge_specs.extend([ + ((side, limb, "V0D"), (side, limb, "PF", "FA"), -4.0, "inhibitory"), + ((side, limb, "InV0V"), (side, limb, "PF", "FA"), -3.0, "inhibitory"), + ]) + + # Create the edges using create_edges + edges = create_edges( + edge_specs, base_name="", visual_options=options.EdgeVisualOptions() + ) + return edges + + +def connect_fore_hind_circuits(): + """Connect CPG's to Interneurons.""" + + edge_specs = [] + + for side in ("left", "right"): + edge_specs.extend([ + ((side, "fore", "RG", "F"), (side, "fore", "Ini", "hom"), 0.70, "excitatory"), + ((side, "fore", "RG", "F"), (side, "V0D", "diag"), 0.50, "excitatory"), + ((side, "fore", "Ini", "hom"), (side, "hind", "RG", "F"), -0.01, "inhibitory"), + ((side, "fore", "Sh2", "hom"), (side, "hind", "RG", "F"), 0.01, "excitatory"), + ((side, "hind", "Sh2", "hom"), (side, "fore", "RG", "F"), 0.05, "excitatory"), + ((side, "fore", "RG", "F"), (side, "fore", "V0V", "diag"), 0.325, "excitatory"), + ((side, "hind", "RG", "F"), (side, "hind", "V3", "diag"), 0.325, "excitatory") + ]) + for limb in ("hind", "fore"): + edge_specs.extend([ + ((side, limb, "RG", "E"), (side, limb, "Sh2", "hom"), 0.50, "excitatory") + ]) + + # Handle cross-limb connections + for sides in (("left", "right"), ("right", "left")): + edge_specs.extend([ + ((sides[0], "V0D", "diag"), (sides[1], "hind", "RG", "F"), -0.01, "inhibitory"), + ((sides[0], "fore", "V0V", "diag"), (sides[1], "hind", "RG", "F"), 0.005, "excitatory"), + ((sides[0], "hind", "V3", "diag"), (sides[1], "fore", "RG", "F"), 0.04, "excitatory") + ]) + + # Create the edges using create_edges + edges = create_edges( + edge_specs, base_name="", visual_options=options.EdgeVisualOptions() + ) + + return edges + + +def connect_pattern_motor_layer(base_name, muscle, patterns): + """Return edge specs for connecting pattern formation to motor neuron layer.""" + edge_specs = [ + (("PF", pattern), (muscle, "Mn"), 0.1, "excitatory",) + for pattern in patterns + ] + return create_edges(edge_specs, base_name=base_name) + + +def connect_pattern_to_IaIn(side, limb): + """Return edge specs for connecting pattern formation to motor neuron layer.""" + edge_specs = [ + (("PF", pattern), ("Ia", "In", pattern), 0.01, "excitatory") + for pattern in ("FA", "EA", "FB", "EB") + ] + return edge_specs + + +def connect_II_pattern_feedback(side, limb, muscle, patterns): + """Return edge specs for connecting group II feedback to pattern layer.""" + edge_specs = [ + ((muscle, "II"), ("PF", pattern), 0.01, "excitatory") + for pattern in patterns + ] + return edge_specs + + +def connect_Ia_reciprocal_inhibition_extensor2flexor(extensor: str, flexor: str): + """Return edge specs for Ia reciprocal inhibition from extensor to flexor.""" + edge_specs = [ + ((extensor, "Ia"), ("Ia", "In", "EA"), 0.01, "excitatory"), + ((extensor, "Ia"), ("Ia", "In", "EB"), 0.01, "excitatory"), + (("Ia", "In", "EA"), (flexor, "Mn"), -0.01, "inhibitory"), + (("Ia", "In", "EB"), (flexor, "Mn"), -0.01, "inhibitory"), + ] + return edge_specs + + +def connect_Ia_reciprocal_inhibition_flexor2extensor(flexor: str, extensor: str): + """Return edge specs for Ia reciprocal inhibition from flexor to extensor.""" + edge_specs = [ + ((flexor, "Ia"), ("Ia", "In", "FA"), 0.01, "excitatory"), + ((flexor, "Ia"), ("Ia", "In", "FB"), 0.01, "excitatory"), + (("Ia", "In", "FA"), (extensor, "Mn"), -0.01, "inhibitory"), + (("Ia", "In", "FB"), (extensor, "Mn"), -0.01, "inhibitory"), + ] + return edge_specs + + +def connect_Rn_reciprocal_facilitation_extensor2flexor(extensor: str, flexor: str): + """Return edge specs for Rn reciprocal facilitation from extensor to flexor.""" + edge_specs = [ + ((extensor, "Rn"), ("Ia", "In", "EA"), -0.01, "inhibitory"), + (("Ia", "In", "EA"), (flexor, "Mn"), -0.01, "inhibitory"), + ((extensor, "Rn"), ("Ia", "In", "EB"), -0.01, "inhibitory"), + (("Ia", "In", "EB"), (flexor, "Mn"), -0.01, "inhibitory"), + ] + return edge_specs + + +def connect_Rn_reciprocal_facilitation_flexor2extensor(flexor: str, extensor: str): + """Return edge specs for Rn reciprocal facilitation from flexor to extensor.""" + edge_specs = [ + ((flexor, "Rn"), ("Ia", "In", "FA"), -0.01, "inhibitory"), + (("Ia", "In", "FA"), (extensor, "Mn"), -0.01, "inhibitory"), + ((flexor, "Rn"), ("Ia", "In", "FB"), -0.01, "inhibitory"), + (("Ia", "In", "FB"), (extensor, "Mn"), -0.01, "inhibitory"), + ] + return edge_specs + + +def connect_Ib_disynaptic_extensor_excitation(extensor): + """Return edge specs for Ib disynaptic excitation in extensor.""" + edge_specs = [ + ((extensor, "Ib"), (extensor, "Ib", "In", "e"), 1.0, "excitatory"), + ((extensor, "Ib", "In", "e"), (extensor, "Mn"), 1.0, "excitatory"), + ] + return edge_specs + + +def connect_Ib_rg_feedback(extensor): + """Return edge specs for Ib rhythm feedback.""" + edge_specs = [ + ((extensor, "Ib"), ("Ib", "In", "RG"), 1.0, "excitatory"), + (("Ib", "In", "RG"), ("RG", "In", "E"), 1.0, "excitatory"), + (("Ib", "In", "RG"), ("RG", "E"), 1.0, "excitatory"), + ] + return edge_specs + + +def connect_II_rg_feedback(flexor): + """Return edge specs for II rhythm feedback.""" + edge_specs = [ + ((flexor, "II"), (flexor, "II", "In", "RG"), 1.0, "excitatory"), + ((flexor, "II", "In", "RG"), ("RG", "F"), 1.0, "excitatory"), + ((flexor, "II", "In", "RG"), ("RG", "In", "F"), 1.0, "excitatory"), + ] + return edge_specs + + +def connect_roll_vestibular_to_mn(side, fore_muscles, hind_muscles, default_weight=0.01): + """Return edge specs for roll vestibular to motor neurons.""" + rates = ("position", "velocity") + weights = {"position": default_weight, "velocity": default_weight} + edge_specs = [] + + for rate in rates: + for muscle in fore_muscles: + edge_specs.append( + ( + (rate, "roll", "cclock", "In", "Vn"), + ("fore", muscle["name"], "Mn"), + weights[rate], + "excitatory", + ) + ) + + for muscle in hind_muscles: + edge_specs.append( + ( + (rate, "roll", "cclock", "In", "Vn"), + ("hind", muscle["name"], "Mn"), + weights[rate], + "excitatory", + ) + ) + edges = create_edges(edge_specs, base_name=side) + return edges + + +def connect_pitch_vestibular_to_mn( + side, fore_muscles, hind_muscles, default_weight=0.01 +): + """Return edge specs for pitch vestibular to motor neurons.""" + rates = ("position", "velocity") + + edge_specs = [] + for rate in rates: + for muscle in fore_muscles: + edge_specs.append( + ( + (rate, "pitch", "cclock", "In", "Vn"), + ("fore", muscle["name"], "Mn"), + default_weight, + "excitatory", + ) + ) + + for muscle in hind_muscles: + edge_specs.append( + ( + (rate, "pitch", "clock", "In", "Vn"), + ("hind", muscle["name"], "Mn"), + default_weight, + "excitatory", + ) + ) + edges = create_edges(edge_specs, base_name=side) + return edges + + +def connect_somatosensory_to_rhythm( + side, limb, contacts, default_weight=0.01 +): + """ Connect somatosensory to Rhythm generation """ + edge_specs = [ + (("In", "cut"), ("RG", "In", "E"), default_weight, "excitatory"), + (("In", "cut"), ("RG", "E"), default_weight, "excitatory"), + ] + edges = create_edges(edge_specs, base_name=join_str((side, limb))) + return edges + + +########### +# Muscles # +########### +def define_muscle_patterns() -> dict: + muscles_patterns = { + "hind": { + "bfa": ["EA", "EB"], + "ip": ["FA", "FB"], + "bfpst": ["FA", "EA", "FB", "EB"], + "rf": ["EA", "FB", "EB"], + "va": ["EA", "FB", "EB"], + "mg": ["FA", "EA", "EB"], + "sol": ["EA", "EB"], + "ta": ["FA", "FB"], + "ab": ["FA", "EA", "FB", "EB"], + "gm_dorsal": ["FA", "EA", "FB", "EB"], + "edl": ["FA", "EA", "FB", "EB"], + "fdl": ["FA", "EA", "FB", "EB"], + }, + "fore": { + "spd": ["FA", "EA", "FB", "EB"], + "ssp": ["FA", "EA", "FB", "EB"], + "abd": ["FA", "EA", "FB", "EB"], + "add": ["FA", "EA", "FB", "EB"], + "tbl": ["EA", "EB"], + "tbo": ["EA", "EB"], + "bbs": ["FA", "FB"], + "bra": ["FA", "FB"], + "ecu": ["FA", "FB"], + "fcu": ["EA", "EB"], + } + } + return muscles_patterns + + +def generate_muscle_agonist_antagonist_pairs(muscle_config_path: str) -> dict: + # read muscle config file + muscles_config = read_yaml(muscle_config_path) + + sides = ("left", "right") + limbs = ("hind", "fore") + + muscles = { + sides[0]: { + limbs[0]: {"agonist": [], "antagonist": []}, + limbs[1]: {"agonist": [], "antagonist": []}, + }, + sides[1]: { + limbs[0]: {"agonist": [], "antagonist": []}, + limbs[1]: {"agonist": [], "antagonist": []}, + }, + } + + for _name, muscle in muscles_config["muscles"].items(): + _side = muscle["side"] + _limb = muscle["limb"] + function = muscle.get("function", "agonist") + muscles[_side][_limb][function].append( + { + "name": join_str(_name.split("_")[2:]), + "type": muscle["type"], + "abbrev": muscle["abbrev"], + } + ) + return muscles + + +################ +# Limb Circuit # +################ +def limb_circuit( + network_options: options.NetworkOptions, + side: str, + limb: str, + muscles: dict, + contacts: Iterable[str] = None, + transform_mat=np.identity(3) +): + + # TODO: Change the use of side and limb attr name in loops + # Base name + name = join_str((side, limb)) + + ##################### + # Rhythm generation # + ##################### + rhythm = RhythmGenerator(name=name, transform_mat=transform_mat) + network_options.add_nodes(rhythm.nodes().values()) + network_options.add_edges(rhythm.edges().values()) + + ##################### + # Pattern Formation # + ##################### + pattern_transformation_mat = transform_mat@( + get_translation_matrix(off_x=0.0, off_y=7.5) + ) + + pattern = PatternFormation( + name=name, + transform_mat=pattern_transformation_mat + ) + network_options.add_nodes(pattern.nodes().values()) + network_options.add_edges(pattern.edges().values()) + + # Connect sub layers + rhythm_pattern_edges = connect_rhythm_pattern(name) + network_options.add_edges(rhythm_pattern_edges.values()) + + + ############## + # MotorLayer # + ############## + ################################### + # Connect patterns and motorlayer # + ################################### + muscles_patterns = define_muscle_patterns() + + motor_transformation_mat = pattern_transformation_mat@( + get_translation_matrix( + off_x=0.0, + off_y=5.0 + ) + ) + + motor = MotorLayer( + muscles=muscles[side][limb], + name=name, + transform_mat=motor_transformation_mat + ) + + network_options.add_nodes(motor.nodes().values()) + # network_options.add_edges(motor.edges().values()) + + # Connect pattern formation to motor neurons + for muscle, patterns in muscles_patterns[limb].items(): + pattern_motor_edges = connect_pattern_motor_layer(name, muscle, patterns) + network_options.add_edges(pattern_motor_edges.values()) + + # Connect pattern formation to IaIn + edge_specs = connect_pattern_to_IaIn(side, limb) + network_options.add_edges((create_edges(edge_specs, name)).values()) + + ##################### + # Connect afferents # + ##################### + edge_specs = [] + Ib_feedback_to_rg = { + "hind": ["mg", "sol", "fdl"], + "fore": ["fcu"] + } + + Ib_feedback_disynaptic_excitation ={ + "hind": ["bfa", "bfpst", "va", "rf", "mg", "sol", "fdl"], + "fore": ["ssp", "tbl", "tbo", "fcu"] + } + + II_feedback_to_pattern = { + "hind": { + "ip": muscles_patterns["hind"]["ip"], + "ta": muscles_patterns["hind"]["ta"], + "edl": muscles_patterns["hind"]["edl"], + }, + "fore": { + "spd": muscles_patterns["fore"]["spd"], + "ecu": muscles_patterns["fore"]["ecu"], + } + } + + II_feedback_to_rg = { + "hind": ["ip", "ta",], + "fore": ["ssp", "bra", "ecu"] + } + + Ia_reciprocal_inhibition_extensor2flexor = { + "hind": { + "extensors": ["bfa", "bfpst", "rf", "va", "mg", "sol", "fdl"], + "flexors": ["ip", "ta", "edl"], + }, + "fore": { + "extensors": ["ssp", "tbl", "tbo", "fcu"], + "flexors": ["spd", "bra", "bbs", "ecu"], + } + } + + Ia_reciprocal_inhibition_flexor2extensor = { + "hind": { + "extensors": ["bfa", "bfpst", "rf", "va", "mg", "sol", "fdl"], + "flexors": ["ip", "ta", "edl"], + }, + "fore": { + "extensors": ["ssp", "tbl", "tbo", "fcu"], + "flexors": ["spd", "bra", "bbs", "ecu"], + } + } + + renshaw_reciprocal_facilitation_extensor2flexor = { + "hind": { + "extensors": ["bfa", "bfpst", "rf", "va", "mg", "sol", "fdl"], + "flexors": ["ip", "ta", "edl"], + }, + "fore": { + "extensors": ["ssp", "tbl", "tbo", "fcu"], + "flexors": ["spd", "bra", "bbs", "ecu"], + } + } + + renshaw_reciprocal_facilitation_flexor2extensor = { + "hind": { + "extensors": ["bfa", "bfpst", "rf", "va", "mg", "sol", "fdl"], + "flexors": ["ip", "ta", "edl"], + }, + "fore": { + "extensors": ["ssp", "tbl", "tbo", "fcu"], + "flexors": ["spd", "bra", "bbs", "ecu"], + } + } + # Type II connections + # II to Pattern + for muscle, patterns in II_feedback_to_pattern[limb].items(): + edge_specs += connect_II_pattern_feedback( + side, limb=limb, muscle=muscle, patterns=patterns + ) + + for flexor in II_feedback_to_rg[limb]: + edge_specs += connect_II_rg_feedback(flexor) + # Type Ib connections + # Ib to RG + for extensor in Ib_feedback_to_rg[limb]: + edge_specs += connect_Ib_rg_feedback(extensor) + # Ib Disynaptic extensor excitation + for extensor in Ib_feedback_disynaptic_excitation[limb]: + edge_specs += connect_Ib_disynaptic_extensor_excitation(extensor) + # Type Ia connections + # Ia reciprocal inhibition extensor to flexor + for extensor in Ia_reciprocal_inhibition_extensor2flexor[limb]["extensors"]: + for flexor in Ia_reciprocal_inhibition_extensor2flexor[limb]["flexors"]: + edge_specs += connect_Ia_reciprocal_inhibition_extensor2flexor( + extensor, flexor + ) + # Ia reciprocal inhibition flexor to extensor + for flexor in Ia_reciprocal_inhibition_flexor2extensor[limb]["flexors"]: + for extensor in Ia_reciprocal_inhibition_flexor2extensor[limb]["extensors"]: + edge_specs += connect_Ia_reciprocal_inhibition_flexor2extensor( + flexor, extensor + ) + # Renshaw recurrent connections + # renshaw reciprocal facilitation extensor to flexor + for extensor in renshaw_reciprocal_facilitation_extensor2flexor[limb]["extensors"]: + for flexor in renshaw_reciprocal_facilitation_extensor2flexor[limb]["flexors"]: + edge_specs += connect_Rn_reciprocal_facilitation_extensor2flexor(extensor, flexor) + # renshaw reciprocal facilitation flexor to extensor + for flexor in renshaw_reciprocal_facilitation_flexor2extensor[limb]["flexors"]: + for extensor in renshaw_reciprocal_facilitation_flexor2extensor[limb]["extensors"]: + edge_specs += connect_Rn_reciprocal_facilitation_flexor2extensor(flexor, extensor) + + edges = create_edges(edge_specs, name) + network_options.add_edges(edges.values()) + + # somatosensory system + if contacts: + somatosensory_transformation_mat = ( + motor_transformation_mat@get_translation_matrix(off_x=1.0, off_y=10.0) + ) + somatosensory = SomatoSensory( + contacts=contacts, + name=join_str((side, limb)), + transform_mat=somatosensory_transformation_mat + ) + network_options.add_nodes(somatosensory.nodes().values()) + network_options.add_edges(somatosensory.edges().values()) + # Connect somatosensory to Rhythm + edges = connect_somatosensory_to_rhythm( + side, limb, contacts, default_weight=1.0 + ) + network_options.add_edges(edges.values()) + + return network_options + + +##################### +# Interlimb Circuit # +##################### +def interlimb_circuit( + network_options: options.NetworkOptions, + sides: List[str], + limbs: List[str], + transform_mat=np.identity(3) +): + for side in sides: + for limb in limbs: + commissural_offset_x, commissural_offset_y = 5.0, 2.5 # Independent offsets + off_x = -commissural_offset_x if side == "left" else commissural_offset_x + off_y = (commissural_offset_y + 20) if limb == "fore" else commissural_offset_y + mirror_x = limb == "hind" + mirror_y = side == "right" + commissural = Commissural( + name=join_str((side, limb)), transform_mat=( + transform_mat@get_translation_matrix(off_x=off_x, off_y=off_y)@get_mirror_matrix( + mirror_x=mirror_y, mirror_y=mirror_y + ) + ) + ) + network_options.add_nodes(commissural.nodes().values()) + network_options.add_edges(commissural.edges().values()) + + lpsn_x_offset = 25.0 + lpsn_y_offset = 20 - 5.5 # Adjusted relative to base fore_y_offset + off_x = -lpsn_x_offset if side == "left" else lpsn_x_offset + off_y = lpsn_y_offset + mirror_y = side == "right" + lpsn = LPSN( + name=side, + transform_mat=( + transform_mat@get_translation_matrix(off_x=off_x, off_y=off_y)@get_mirror_matrix( + mirror_x=False, mirror_y=mirror_y + ) + ) + ) + network_options.add_nodes(lpsn.nodes().values()) + network_options.add_edges(lpsn.edges().values()) + + return network_options + + +###################### +# Vestibular Circuit # +###################### +def vestibular_circuit( + network_options: options.NetworkOptions, + position=True, + velocity=True, + transform_mat=np.identity(3), +): + rates = ("position", "velocity",) + axes = ("roll", "pitch",) + # Define base offsets for positioning + base_x_offset = -15.0 + base_y_offset = 0.0 + spacing_x = 10.0 # Horizontal spacing between systems + spacing_y = 5.0 # Vertical spacing between systems + for side_index, side in enumerate(("left", "right",)): + for rate_index, rate in enumerate(rates): + for axis_index, axis in enumerate(axes): + # Calculate unique offsets for each system + off_x = base_x_offset + axis_index * spacing_x # Axes placed next to each other + off_y = base_y_offset + rate_index * spacing_y # Rates placed below each other + + mirror_x = False + if rate == "position": + mirror_x = True + if side == "right": + off_x = -1*off_x + vestibular = VestibularSystem( + side=side, + rate=rate, + axis=axis, + transform_mat=transform_mat@get_translation_matrix( + off_x=off_x, off_y=off_y + )@get_mirror_matrix(mirror_x=mirror_x, mirror_y=False) + ) + network_options.add_nodes(vestibular.nodes().values()) + network_options.add_edges(vestibular.edges().values()) + + return network_options + + +################## +# BrainStemDrive # +################## +def brain_stem_circuit( + network_options: options.NetworkOptions, + transform_mat=np.identity(3), +): + # Brain stem global drive(alpha) + brain_stem = BrainStemDrive( + transform_mat=transform_mat + ) + network_options.add_nodes(brain_stem.nodes().values()) + network_options.add_edges(brain_stem.edges().values()) + + edge_specs = [] + for node in network_options.nodes: + if ("DR" in node.name) and ("BS_DR" not in node.name) and node.model == "linear": + edge_specs.append( + ( + ("BS", "DR"), + (node.name,), + 1.0, + "excitatory", + ) + ) + edges = create_edges(edge_specs, base_name="") + return edges + + +##################### +# Quadruped Circuit # +##################### +def quadruped_circuit( + network_options: options.NetworkOptions, + transform_mat=np.identity(3) +): + """ Full Quadruped Circuit """ + + # read muscle config file + muscles_config_path = "/Users/tatarama/projects/work/research/neuromechanics/quadruped/mice/mouse-locomotion/data/config/muscles/quadruped_siggraph.yaml" + muscles = generate_muscle_agonist_antagonist_pairs(muscles_config_path) + + # Limb circuitry + network_options = limb_circuit( + network_options, + side="left", + limb="hind", + muscles=muscles, + contacts=("PHALANGE",), + transform_mat=get_translation_matrix( + off_x=-25.0, off_y=0.0 + )@get_mirror_matrix( + mirror_x=True, mirror_y=False + )@get_rotation_matrix(angle=45) + ) + + network_options = limb_circuit( + network_options, + side="right", + limb="hind", + muscles=muscles, + contacts=("PHALANGE",), + transform_mat=get_translation_matrix( + off_x=25.0, off_y=0.0 + )@get_mirror_matrix( + mirror_x=True, mirror_y=False + )@get_rotation_matrix(angle=-45) + ) + + network_options = limb_circuit( + network_options, + side="left", + limb="fore", + muscles=muscles, + contacts=("PHALANGE",), + transform_mat=get_translation_matrix( + off_x=-25.0, off_y=25.0 + )@get_rotation_matrix(angle=45) + ) + + network_options = limb_circuit( + network_options, + side="right", + limb="fore", + muscles=muscles, + contacts=("PHALANGE",), + transform_mat=get_translation_matrix( + off_x=25.0, off_y=25.0 + )@get_rotation_matrix(angle=-45) + ) + + # Commisural + network_options = interlimb_circuit( + network_options, + sides=("left", "right"), + limbs=("hind", "fore",), + ) + + ################################# + # Connect rhythm to commissural # + ################################# + rg_commissural_edges = connect_rg_commissural() + network_options.add_edges(rg_commissural_edges.values()) + + ################################## + # Connect pattern to commissural # + ################################## + pattern_commissural_edges = connect_pattern_commissural() + # network_options.add_edges(pattern_commissural_edges.values()) + + ############################## + # Connect fore and hind lpsn # + ############################## + fore_hind_edges = connect_fore_hind_circuits() + network_options.add_edges(fore_hind_edges.values()) + + #################### + # VestibularSystem # + #################### + network_options = vestibular_circuit(network_options) + for side in ('left', 'right'): + vn_edges = connect_pitch_vestibular_to_mn( + side=side, + fore_muscles=[ + *muscles[side]['fore']['agonist'], + *muscles[side]['fore']['antagonist'] + ], + hind_muscles=[ + *muscles[side]['hind']['agonist'], + *muscles[side]['hind']['antagonist'] + ], + default_weight=0.1 + ) + network_options.add_edges(vn_edges.values()) + + vn_edges = connect_roll_vestibular_to_mn( + side=side, + fore_muscles=[ + *muscles['left']['fore']['agonist'], + *muscles['left']['fore']['antagonist'] + ], + hind_muscles=[ + *muscles['left']['hind']['agonist'], + *muscles['left']['hind']['antagonist'] + ], + default_weight=0.1 + ) + network_options.add_edges(vn_edges.values()) + + edges = brain_stem_circuit( + network_options, + transform_mat=get_translation_matrix(off_x=0.0, off_y=40.0) + ) + network_options.add_edges(edges.values()) + + return network_options diff --git a/examples/mouse/run.py b/examples/mouse/run.py new file mode 100644 index 0000000..63747e0 --- /dev/null +++ b/examples/mouse/run.py @@ -0,0 +1,797 @@ +""" Generate and reproduce Zhang, Shevtsova, et al. eLife 2022;11:e73424. DOI: +https://doi.org/10.7554/eLife.73424 paper network """ + +import numpy.matlib as npml +import seaborn as sns +from farms_core.io.yaml import read_yaml +from farms_core.utils import profile +from farms_network.core import options +from farms_network.core.network import Network +from farms_network.numeric.integrators_cy import RK4Solver +from scipy.integrate import ode +from tqdm import tqdm + +from components import * +from components import limb_circuit + + +def generate_network(n_iterations: int): + """Generate network""" + + # Main network + network_options = options.NetworkOptions( + directed=True, + multigraph=False, + graph={"name": "mouse"}, + integration=options.IntegrationOptions.defaults( + n_iterations=n_iterations, + timestep=1.0, + ), + logs=options.NetworkLogOptions( + n_iterations=n_iterations, + ), + ) + + ############## + # MotorLayer # + ############## + # read muscle config file + muscles_config = read_yaml( + "/Users/tatarama/projects/work/research/neuromechanics/quadruped/mice/mouse-locomotion/data/config/muscles/quadruped_siggraph.yaml" + ) + + def update_muscle_name(name: str) -> str: + """Update muscle name format""" + return name.replace("_", "-") + + muscles = { + "left": { + "hind": {"agonist": [], "antagonist": []}, + "fore": {"agonist": [], "antagonist": []}, + }, + "right": { + "hind": {"agonist": [], "antagonist": []}, + "fore": {"agonist": [], "antagonist": []}, + }, + } + + for name, muscle in muscles_config["muscles"].items(): + side = muscle["side"] + limb = muscle["limb"] + function = muscle.get("function", "agonist") + muscles[side][limb][function].append( + { + "name": join_str(name.split("_")[2:]), + "type": muscle["type"], + "abbrev": muscle["abbrev"], + } + ) + + ################################### + # Connect patterns and motorlayer # + ################################### + hind_muscle_patterns = { + "bfa": ["EA", "EB"], + "ip": ["FA", "FB"], + "bfpst": ["FA", "EA", "FB", "EB"], + "rf": ["EA", "FB", "EB"], + "va": ["EA", "FB", "EB"], + "mg": ["FA", "EA", "EB"], + "sol": ["EA", "EB"], + "ta": ["FA", "FB"], + "ab": ["FA", "EA", "FB", "EB"], + "gm_dorsal": ["FA", "EA", "FB", "EB"], + "edl": ["FA", "EA", "FB", "EB"], + "fdl": ["FA", "EA", "FB", "EB"], + } + + fore_muscle_patterns = { + "spd": ["FA", "EA", "FB", "EB"], + "ssp": ["FA", "EA", "FB", "EB"], + "abd": ["FA", "EA", "FB", "EB"], + "add": ["FA", "EA", "FB", "EB"], + "tbl": ["FA", "EA", "FB", "EB"], + "tbo": ["FA", "EA", "FB", "EB"], + "bbs": ["FA", "FB"], + "bra": ["FA", "EA", "FB", "EB"], + "ecu": ["FA", "EA", "FB", "EB"], + "fcu": ["FA", "EA", "FB", "EB"], + } + + # Generate rhythm centers + scale = 1.0 + for side in ("left", "right"): + for limb in ("fore", "hind"): + # Rhythm + rg_x, rg_y = 10.0, 7.5 + off_x = -rg_x if side == "left" else rg_x + off_y = rg_y if limb == "fore" else -rg_y + mirror_x = limb == "hind" + mirror_y = side == "right" + rhythm = RhythmGenerator( + name=join_str((side, limb)), + transform_mat=get_transform_mat( + angle=0, + off_x=off_x, + off_y=off_y, + mirror_x=mirror_x, + mirror_y=mirror_y, + ), + ) + network_options.add_nodes((rhythm.nodes()).values()) + network_options.add_edges((rhythm.edges()).values()) + # Commissural + comm_x, comm_y = rg_x - 7.0, rg_y + 0.0 + off_x = -comm_x if side == "left" else comm_x + off_y = comm_y if limb == "fore" else -comm_y + mirror_x = limb == "hind" + mirror_y = side == "right" + commissural = Commissural( + name=join_str((side, limb)), + transform_mat=get_transform_mat( + angle=0, + off_x=off_x, + off_y=off_y, + mirror_x=mirror_x, + mirror_y=mirror_y, + ), + ) + network_options.add_nodes((commissural.nodes()).values()) + # Drive + commissural_drive = CommissuralDrive( + name=join_str((side, limb)), + transform_mat=get_transform_mat( + angle=0, + off_x=off_x, + off_y=off_y, + mirror_x=mirror_x, + mirror_y=mirror_y, + ), + ) + network_options.add_nodes((commissural_drive.nodes()).values()) + # Pattern + pf_x, pf_y = rg_x + 0.0, rg_y + 7.5 + off_x = -pf_x if side == "left" else pf_x + off_y = pf_y if limb == "fore" else -pf_y + mirror_x = limb == "hind" + mirror_y = side == "right" + pattern = PatternFormation( + name=join_str((side, limb)), + transform_mat=get_transform_mat( + angle=0, + off_x=off_x, + off_y=off_y, + mirror_x=mirror_x, + mirror_y=mirror_y, + ), + ) + network_options.add_nodes((pattern.nodes()).values()) + network_options.add_edges((pattern.edges()).values()) + + rhythm_pattern_edges = connect_rhythm_pattern(base_name=join_str((side, limb))) + network_options.add_edges(rhythm_pattern_edges.values()) + + # Motor Layer + motor_x = pf_x + 0.5 * max( + len(muscles["left"][limb]["agonist"]), + len(muscles["left"][limb]["antagonist"]), + ) + motor_y = pf_y + 5.0 + + # Determine the mirror_x and mirror_y flags based on side and limb + mirror_x = True if limb == "hind" else False + mirror_y = True if side == "right" else False + + # Create MotorLayer for each side and limb + motor = MotorLayer( + muscles=muscles[side][limb], + name=join_str((side, limb)), + transform_mat=get_transform_mat( + angle=0.0, + off_x=motor_x if side == "right" else -motor_x, + off_y=motor_y if limb == "fore" else -motor_y, + mirror_x=mirror_x, + mirror_y=mirror_y, + ), + ) + network_options.add_nodes((motor.nodes()).values()) + network_options.add_edges((motor.edges()).values()) + # LPSN + lpsn_x = rg_x - 9.0 + lpsn_y = rg_y - 5.5 + off_x = -lpsn_x if side == "left" else lpsn_x + off_y = lpsn_y + mirror_y = side == "right" + lpsn = LPSN( + name=side, + transform_mat=get_transform_mat( + angle=0, + off_x=off_x, + off_y=off_y, + mirror_y=mirror_y, + ), + ) + network_options.add_nodes((lpsn.nodes()).values()) + lpsn_drive = LPSNDrive( + name=side, + transform_mat=get_transform_mat( + angle=0, + off_x=off_x, + off_y=off_y, + mirror_x=mirror_x, + mirror_y=mirror_y, + ), + ) + network_options.add_nodes((lpsn_drive.nodes()).values()) + + # Connect pattern layer to motor layer + for muscle, patterns in hind_muscle_patterns.items(): + pattern_edges = connect_pattern_motor_layer( + base_name=join_str((side, "hind")), muscle=muscle, patterns=patterns + ) + network_options.add_edges(pattern_edges.values()) + for muscle, patterns in fore_muscle_patterns.items(): + pattern_edges = connect_pattern_motor_layer( + base_name=join_str((side, "fore")), muscle=muscle, patterns=patterns + ) + network_options.add_edges(pattern_edges.values()) + + ################################# + # Connect rhythm to commissural # + ################################# + rg_commissural_edges = connect_rg_commissural() + network_options.add_edges(rg_commissural_edges.values()) + + ############################## + # Connect fore and hind lpsn # + ############################## + fore_hind_edges = connect_fore_hind_circuits() + network_options.add_edges(fore_hind_edges.values()) + + edge_specs = [] + + for side in ("left", "right"): + for limb in ("fore", "hind"): + edge_specs.extend([ + ((side, limb, "RG", "F", "DR"), (side, limb, "RG", "F"), 1.0, "excitatory"), + ((side, limb, "RG", "E", "DR"), (side, limb, "RG", "E"), 1.0, "excitatory"), + ((side, limb, "V0V", "DR"), (side, limb, "V0V"), -1.0, "inhibitory"), + ((side, limb, "V0D", "DR"), (side, limb, "V0D"), -1.0, "inhibitory"), + ]) + + # Add the diagonal V0D connection + edge_specs.append( + ((side, "V0D", "diag", "DR"), (side, "V0D", "diag"), -1.0, "inhibitory") + ) + + # Create the edges using create_edges + edges = create_edges( + edge_specs, + base_name="", + visual_options=options.EdgeVisualOptions() + ) + network_options.add_edges(edges.values()) + + return network_options + + +def generate_limb_circuit(n_iterations: int): + """ Generate limb circuit """ + # Main network + network_options = options.NetworkOptions( + directed=True, + multigraph=False, + graph={"name": "mouse"}, + integration=options.IntegrationOptions.defaults( + n_iterations=n_iterations, + timestep=1.0, + ), + logs=options.NetworkLogOptions( + n_iterations=n_iterations, + ), + ) + + ############## + # MotorLayer # + ############## + # read muscle config file + muscles_config = read_yaml( + "/Users/tatarama/projects/work/research/neuromechanics/quadruped/mice/mouse-locomotion/data/config/muscles/quadruped_siggraph.yaml" + ) + + ################################### + # Connect patterns and motorlayer # + ################################### + hind_muscle_patterns = { + "bfa": ["EA", "EB"], + "ip": ["FA", "FB"], + "bfpst": ["FA", "EA", "FB", "EB"], + "rf": ["EA", "FB", "EB"], + "va": ["EA", "FB", "EB"], + "mg": ["FA", "EA", "EB"], + "sol": ["EA", "EB"], + "ta": ["FA", "FB"], + "ab": ["FA", "EA", "FB", "EB"], + "gm_dorsal": ["FA", "EA", "FB", "EB"], + "edl": ["FA", "EA", "FB", "EB"], + "fdl": ["FA", "EA", "FB", "EB"], + } + + fore_muscle_patterns = { + "spd": ["FA", "EA", "FB", "EB"], + "ssp": ["FA", "EA", "FB", "EB"], + "abd": ["FA", "EA", "FB", "EB"], + "add": ["FA", "EA", "FB", "EB"], + "tbl": ["FA", "EA", "FB", "EB"], + "tbo": ["FA", "EA", "FB", "EB"], + "bbs": ["FA", "FB"], + "bra": ["FA", "EA", "FB", "EB"], + "ecu": ["FA", "EA", "FB", "EB"], + "fcu": ["FA", "EA", "FB", "EB"], + } + + def update_muscle_name(name: str) -> str: + """Update muscle name format""" + return name.replace("_", "-") + + muscles = { + "left": { + "hind": {"agonist": [], "antagonist": []}, + "fore": {"agonist": [], "antagonist": []}, + }, + "right": { + "hind": {"agonist": [], "antagonist": []}, + "fore": {"agonist": [], "antagonist": []}, + }, + } + + for name, muscle in muscles_config["muscles"].items(): + side = muscle["side"] + limb = muscle["limb"] + function = muscle.get("function", "agonist") + muscles[side][limb][function].append( + { + "name": join_str(name.split("_")[2:]), + "type": muscle["type"], + "abbrev": muscle["abbrev"], + } + ) + + network_options = limb_circuit( + network_options, + side="right", + limb="hind", + muscles=muscles, + contacts=("PHALANGE",), + transform_mat=get_translation_matrix(off_x=-25.0, off_y=0.0) + ) + + return network_options + + +def generate_quadruped_circuit( + n_iterations: int +): + # Main network + network_options = options.NetworkOptions( + directed=True, + multigraph=False, + graph={"name": "quadruped"}, + integration=options.IntegrationOptions.defaults( + n_iterations=int(n_iterations), + timestep=1.0, + ), + logs=options.NetworkLogOptions( + buffer_size=int(n_iterations), + ), + ) + network_options = quadruped_circuit(network_options) + return network_options + + +def run_network(*args): + network_options = args[0] + + network = Network.from_options(network_options) + iterations = network_options.integration.n_iterations + timestep = network_options.integration.timestep + network.setup_integrator() + + integrator = ode(network.get_ode_func()).set_integrator( + u'dopri5', + method=u'adams', + max_step=0.0, + # nsteps=0 + ) + nnodes = len(network_options.nodes) + integrator.set_initial_value(np.zeros(len(network.data.states.array[:]),), 0.0) + + # print("Data ------------", np.array(network.network.data.states.array)) + + # data.to_file("/tmp/sim.hdf5") + + # # Integrate + states = np.ones((iterations, len(network.data.states.array[:])))*1.0 + states_tmp = np.zeros((len(network.data.states.array[:],))) + outputs = np.ones((iterations, len(network.data.outputs.array[:])))*1.0 + # states[0, 2] = -1.0 + + # for index, node in enumerate(network_options.nodes): + # print(index, node.name) + # network.data.external_inputs.array[:] = np.ones((1,))*(iteration/iterations)*1.0 + drive_input_indices = [ + index + for index, node in enumerate(network_options.nodes) + if "BS_input" in node.name and node.model == "relay" + ] + inputs = np.zeros(np.shape(network.data.external_inputs.array[:])) + + # Network drive : Alpha + time_vec = np.arange(0, iterations)*timestep + drive = 1.0 + drive_vec = np.hstack( + (np.linspace(0, 1.05, len(time_vec[::2])), + np.linspace(1.05, 0, len(time_vec[::2]))) + ) + + for iteration in tqdm(range(0, iterations), colour='green', ascii=' >='): + time = iteration + # network.step(network.ode, iteration*1e-3, network.data.states.array) + # network.step() + # states[iteration+1, :] = network.data.states.array + # network.step() + # network.evaluate(iteration*1e-3, states[iteration, :]) + + network.data.nodes['BS_input'].external_input.values = drive_vec[iteration]*drive + + network.step(time) + network.update_logs(time) + # integrator.set_initial_value(integrator.y, integrator.t) + # integrator.integrate(integrator.t+1.0) + # network.data.states.array[:] = integrator.y + # outputs[iteration, :] = network.data.outputs.array + # states[iteration, :] = integrator.y# network.data.states.array + # network._network_cy.update_iteration() + + + # # Integrate + # N_ITERATIONS = network_options.integration.n_iterations + # # states = np.ones((len(network.data.states.array),)) * 1.0 + + # # network_gui = NetworkGUI(data=data) + # # network_gui.run() + + # inputs_view = network.data.external_inputs.array + # drive_input_indices = [ + # index + # for index, node in enumerate(network_options.nodes) + # if "DR" in node.name and node.model == "linear" + # ] + # inputs = np.zeros((len(inputs_view),)) + # for iteration in tqdm(range(0, N_ITERATIONS), colour="green", ascii=" >="): + # inputs[drive_input_indices] = 0.02 + # inputs_view[:] = inputs + # # states = rk4(iteration * 1e-3, states, network.ode, step_size=1) + # # states = network.integrator.step(network, iteration * 1e-3, states) + # network.step() + # # states = network.ode(iteration*1e-3, states) + # # print(np.array(states)[0], network.data.states.array[0], network.data.derivatives.array[0]) + # network.data.times.array[iteration] = iteration*1e-3 + # # network.logging(iteration) + + # network.data.to_file("/tmp/network.h5") + network_options.save("/tmp/network_options.yaml") + + return network + + +def plot_network(network_options): + """ Plot only network """ + + network_options = update_edge_visuals(network_options) + graph = nx.node_link_graph( + network_options, + directed=True, + multigraph=False, + link="edges", + name="name", + source="source", + target="target", + ) + + # plt.figure() + # sparse_array = nx.to_scipy_sparse_array(graph) + # sns.heatmap( + # sparse_array.todense()[50:75, 50:75], cbar=False, square=True, + # linewidths=0.5, + # annot=True + # ) + plt.figure() + pos_circular = nx.circular_layout(graph) + pos_spring = nx.spring_layout(graph) + pos_graphviz = nx.nx_agraph.pygraphviz_layout(graph) + + _ = nx.draw_networkx_nodes( + graph, + pos={ + node: data["visual"]["position"][:2] for node, data in graph.nodes.items() + }, + node_color=[data["visual"]["color"] for node, data in graph.nodes.items()], + alpha=0.25, + edgecolors="k", + linewidths=2.0, + node_size=[300*data["visual"]["radius"] for node, data in graph.nodes.items()], + ) + nx.draw_networkx_labels( + graph, + pos={ + node: data["visual"]["position"][:2] for node, data in graph.nodes.items() + }, + labels={node: data["visual"]["label"] for node, data in graph.nodes.items()}, + font_size=11.0, + font_weight="bold", + font_family="sans-serif", + alpha=1.0, + ) + nx.draw_networkx_edges( + graph, + pos={ + node: data["visual"]["position"][:2] + for node, data in graph.nodes.items() + }, + edge_color=[ + [0.3, 1.0, 0.3] if data["type"] == "excitatory" else [0.7, 0.3, 0.3] + for edge, data in graph.edges.items() + ], + width=1.0, + arrowsize=10, + style="-", + arrows=True, + min_source_margin=5, + min_target_margin=5, + connectionstyle=[ + data["visual"]["connectionstyle"] + for edge, data in graph.edges.items() + ], + ) + plt.show() + + +def get_gait_plot_from_neuron_act(act): + """ Get start and end times of neurons for gait plot. """ + act = np.reshape(act, (np.shape(act)[0], 1)) + act_binary = (np.array(act) > 0.1).astype(int) + act_binary = np.logical_not(act_binary).astype(int) + act_binary[0] = 0 + gait_cycle = [] + start = (np.where(np.diff(act_binary[:, 0]) == 1.))[0] + end = (np.where(np.diff(act_binary[:, 0]) == -1.))[0] + for id, val in enumerate(start[:len(end)]): + # HARD CODED TIME SCALING HERE!! + gait_cycle.append((val*0.001, end[id]*0.001 - val*0.001)) + return gait_cycle + + +def calc_on_offsets(time_vec, out): + os_=((np.diff((out>0.1).astype(np.int64),axis=0)==1).T) + of_=((np.diff((out>0.1).astype(np.int64),axis=0)==-1).T) + onsets=npml.repmat(time_vec[:-1],out.shape[1],1)[os_] + offsets=npml.repmat(time_vec[:-1],out.shape[1],1)[of_] + leg_os=(npml.repmat(np.arange(out.shape[1]),len(time_vec)-1,1).T)[os_] + leg_of=(npml.repmat(np.arange(out.shape[1]),len(time_vec)-1,1).T)[of_] + + times_os=np.stack((onsets,leg_os,np.arange(len(leg_os))),1) + times_os=times_os[times_os[:,0].argsort()] + times_of=np.stack((offsets,leg_of,np.arange(len(leg_of))),1) + times_of=times_of[times_of[:,0].argsort()] + + times = np.concatenate(( + np.concatenate((times_os,np.ones((len(times_os),1))*0.0),1), + np.concatenate((times_of,np.ones((len(times_of),1))*1.0),1))) + times=times[times[:,0].argsort()] + return times + + +def calc_phase(time_vec, out, phase_diffs): + times = calc_on_offsets(time_vec,out) + ref_onsets = times[np.logical_and(times[:,1]==0,times[:,3]==0)][:,0] + phase_dur=np.append(ref_onsets[1:]-ref_onsets[:-1],np.nan) + + p = times[times[:,1]==0] + indices = np.where(np.diff(p[:,3])==1) + fl_phase_dur = np.zeros((len(ref_onsets))) + fl_phase_dur[:] = np.nan + fl_phase_dur[p[indices,2].astype(int)] = p[[ind+1 for ind in indices],0] - p[indices,0] + ex_phase_dur = phase_dur-fl_phase_dur + + M = np.zeros((len(ref_onsets),out.shape[1])) + M[:] = np.nan + M[:,0]=ref_onsets + + + for i in range(1,out.shape[1]): + p = times[np.logical_and((times[:,1]==0) | (times[:,1]==i),times[:,3]==0)] + indices = np.where(np.diff(p[:,1])==i) + M[p[indices,2].astype(int),i] = p[[ind+1 for ind in indices],0] + + + phases=np.zeros((len(ref_onsets),len(phase_diffs))) + for i,(x,y) in enumerate(phase_diffs): + phases[:,i] = ((M[:,y]-M[:,x])/phase_dur) % 1.0 + + if phases.shape[0]!=0: + no_nan = ~np.isnan(np.concatenate( + (np.stack((phase_dur,fl_phase_dur,ex_phase_dur),1),phases),1 + )).any(axis=1) + return (phase_dur[no_nan],fl_phase_dur[no_nan],ex_phase_dur[no_nan],phases[no_nan],ref_onsets[no_nan]) + else: + return (phase_dur,fl_phase_dur,ex_phase_dur,phases,ref_onsets[:-1]) + + +def plot_analysis(network: Network, network_options): + """ Plot analysis """ + plot_names = [ + 'right_fore_RG_F', + 'left_fore_RG_F', + 'right_hind_RG_F', + 'left_hind_RG_F', + ] + + plot_traces = [ + network.log.nodes[name].output.values for name in plot_names + ] + + _split_ramp = int(len(network.log.times.array)/2) + phases_up = calc_phase( + network.log.times.array[:_split_ramp], + (np.asarray(plot_traces[:4]).T)[:_split_ramp], + ((3, 2), (1, 0), (3, 1), (3, 0)) + ) + phases_down = calc_phase( + network.log.times.array[_split_ramp:], + (np.asarray(plot_traces[:4]).T)[_split_ramp:], + ((3, 2), (1, 0), (3, 1), (3, 0)) + ) + + alpha_vec = np.array(network.log.nodes["BS_input"].output.values) + + fig, ax = plt.subplots(4, 1, sharex='all') + for j in range(4): + ax[j].plot(alpha_vec[np.int32(phases_up[4])], phases_up[3][:, j], 'b*') + ax[j].plot(alpha_vec[np.int32(phases_down[4])], phases_down[3][:, j], 'r*') + + fig, ax = plt.subplots(len(plot_names)+2, 1, sharex='all') + #fig.canvas.set_window_title('Model Performance') + fig.suptitle('Model Performance', fontsize=12) + time_vec = np.array(network.log.times.array) + for i, tr in enumerate(plot_traces): + ax[i].plot(time_vec*0.001, np.array(tr), 'b', linewidth=1) + ax[i].grid('on', axis='x') + ax[i].set_ylabel(plot_names[i], fontsize=10) + ax[i].set_yticks([0, 1]) + + _width = 0.2 + colors = ['blue', 'green', 'red', 'black'] + for i, tr in enumerate(plot_traces): + if i > 3: + break + ax[len(plot_names)].broken_barh(get_gait_plot_from_neuron_act(tr), + (1.6-i*0.2, _width), facecolors=colors[i]) + + ax[len(plot_names)].broken_barh(get_gait_plot_from_neuron_act(plot_traces[3]), + (1.0, _width*4), facecolors=(0.2, 0.2, 0.2), alpha=0.5) + ax[len(plot_names)].set_ylim(1.0, 1.8) + ax[len(plot_names)].set_xlim(0) + ax[len(plot_names)].set_xlabel('Time') + ax[len(plot_names)].set_yticks([1.1, 1.3, 1.5, 1.7]) + ax[len(plot_names)].set_yticklabels(['RF', 'LF', 'RH', 'LH']) + ax[len(plot_names)].grid(True) + + ax[len(plot_names)+1].fill_between(time_vec*0.001, 0, alpha_vec, + color=(0.2, 0.2, 0.2), alpha=0.5) + ax[len(plot_names)+1].grid('on', axis='x') + ax[len(plot_names)+1].set_ylabel('ALPHA') + ax[len(plot_names)+1].set_xlabel('Time [s]') + + plt.show() + + +def plot_data(network, network_options): + plot_nodes = [ + index + for index, node in enumerate(network.data.nodes) + if ("RG_F" in node.name) and ("DR" not in node.name) + ] + + plt.figure() + + for index, node_index in enumerate(plot_nodes): + plt.fill_between( + np.array(network.log.times.array)*1e-3, + index + np.array(network.log.nodes[node_index].output.values), + index, + alpha=0.2, + lw=1.0, + ) + plt.plot( + np.array(network.log.times.array)*1e-3, + index + np.array(network.log.nodes[node_index].output.values), + label=network.log.nodes[node_index].name, + ) + plt.legend() + + plot_nodes = [ + index + for index, node in enumerate(network.log.nodes) + if ("Mn" in node.name) + ] + plt.figure() + for index, node_index in enumerate(plot_nodes): + plt.fill_between( + np.array(network.log.times.array)*1e-3, + index + np.array(network.log.nodes[node_index].output.values), + index, + alpha=0.2, + lw=1.0, + ) + plt.plot( + np.array(network.log.times.array)*1e-3, + index + np.array(network.log.nodes[node_index].output.values), + label=network.log.nodes[node_index].name, + ) + plt.legend() + plt.show() + + +def main(): + """Main.""" + + # Generate the network + # network_options = generate_network(int(1e4)) + # network_options = generate_limb_circuit(int(5e4)) + network_options = generate_quadruped_circuit((1e3)) + + # plot_network(network_options) + network = run_network(network_options) + plot_data(network, network_options) + plot_analysis(network, network_options) + + + # from abstract_control.control.generate import quadruped_siggraph_network + # from copy import deepcopy + # og_graph = quadruped_siggraph_network() + + # def update_names(old_names): + # replace_names = { + # "IIIn": "II_In", + # "IbIn": "Ib_In", + # "IaIn": "Ia_In", + # "_motor": "", + # } + # new_names = {} + # for name in old_names: + # new_name = deepcopy(name) + # for old, new in replace_names.items(): + # new_name = new_name.replace(old, new) + # new_names[name] = new_name + # return new_names + + # new_names = update_names(og_graph.nodes) + # og_graph = nx.relabel_nodes(og_graph, mapping=new_names) + + # print(f" OG edges {len(og_graph.edges)}") + # print(f" new edges {len(graph.edges)}") + + # check_edges = 0 + # for edge in graph.edges(): + # if edge in og_graph.edges: + # pass + # else: + # check_edges += 1 + # print(f"{edge} not found...") + # print(f"Check edges {check_edges}") + + +if __name__ == "__main__": + profile.profile(main, profile_filename="/tmp/network.prof") + # main() diff --git a/examples/rhythm_generator/run.py b/examples/rhythm_generator/run.py new file mode 100644 index 0000000..503c41d --- /dev/null +++ b/examples/rhythm_generator/run.py @@ -0,0 +1,743 @@ +import matplotlib.pyplot as plt +import networkx as nx +import numpy as np +import seaborn as sns +from typing import Iterable, List +from farms_core.io.yaml import read_yaml +from farms_core.utils import profile +from farms_network.core import options +from farms_network.core.data import NetworkData +from farms_network.core.network import Network +from farms_network.numeric.integrators_cy import RK4Solver +from scipy.integrate import ode +from tqdm import tqdm + +plt.rcParams['text.usetex'] = False + + +def calculate_arc_rad(source_pos, target_pos, base_rad=-0.1): + """Calculate arc3 radius for edge based on node positions.""" + dx = target_pos[0] - source_pos[0] + dy = target_pos[1] - source_pos[1] + + # Set curvature to zero if nodes are aligned horizontally or vertically + if dx == 0 or dy == 0: + return 0.0 + + # Decide on curvature based on position differences + if abs(dx) > abs(dy): + # Horizontal direction - positive rad for up, negative for down + return -base_rad if dy >= 0 else base_rad + else: + # Vertical direction - positive rad for right, negative for left + return base_rad if dx >= 0 else base_rad + + +def update_edge_visuals(network_options): + """ Update edge options """ + + nodes = network_options.nodes + edges = network_options.edges + for edge in edges: + base_rad = calculate_arc_rad( + nodes[nodes.index(edge.source)].visual.position, + nodes[nodes.index(edge.target)].visual.position, + ) + edge.visual.connectionstyle = f"arc3,rad={base_rad*0.0}" + return network_options + + +def join_str(strings): + return "_".join(filter(None, strings)) + + +def multiply_transform(vec: np.ndarray, transform_mat: np.ndarray) -> np.ndarray: + """ + Multiply a 2D vector with a 2D transformation matrix (3x3). + + Parameters: + vec (np.ndarray): A 2D vector (shape (2,) or (3,)) + transform_mat (np.ndarray): A 3x3 transformation matrix. + + Returns: + np.ndarray: The transformed vector. + """ + + assert transform_mat.shape == (3, 3), "Transformation matrix must be 3x3" + + # Ensure vec is in homogeneous coordinates (i.e., 3 elements). + if vec.shape == (2,): + vec = np.append(vec, 1) + elif vec.shape != (3,): + raise ValueError("Input vector must have shape (2,) or (3,)") + + # Perform the multiplication + return transform_mat @ vec + + +def get_scale_matrix(scale: float) -> np.ndarray: + """Return a scaling matrix.""" + return np.array([[scale, 0, 0], [0, scale, 0], [0, 0, 1]]) + + +def get_mirror_matrix(mirror_x: bool = False, mirror_y: bool = False) -> np.ndarray: + """Return a mirror matrix based on the mirror flags.""" + mirror_matrix = np.identity(3) + if mirror_x: + mirror_matrix = np.array([[1, 0, 0], [0, -1, 0], [0, 0, 1]]) + if mirror_y: + mirror_matrix = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, 1]]) + return mirror_matrix + + +def get_translation_matrix(off_x: float, off_y: float) -> np.ndarray: + """Return a translation matrix.""" + return np.array([[1, 0, off_x], [0, 1, off_y], [0, 0, 1]]) + + +def get_rotation_matrix(angle: float) -> np.ndarray: + """Return a rotation matrix for the given angle in degrees.""" + angle_rad = np.radians(angle) + return np.array( + [ + [np.cos(angle_rad), -np.sin(angle_rad), 0], + [np.sin(angle_rad), np.cos(angle_rad), 0], + [0, 0, 1], + ] + ) + + +def get_transform_mat( + angle: float, + off_x: float, + off_y: float, + mirror_x: bool = False, + mirror_y: bool = False, + scale: float = 2.5, +) -> np.ndarray: + """Return a complete transformation matrix based on input parameters.""" + scale_matrix = get_scale_matrix(scale) + mirror_matrix = get_mirror_matrix(mirror_x, mirror_y) + translation_matrix = get_translation_matrix(off_x, off_y) + rotation_matrix = get_rotation_matrix(angle) + + # Combine the transformations in the correct order: translation -> rotation -> mirror -> scale + transform_matrix = translation_matrix @ rotation_matrix @ mirror_matrix + transform_matrix = scale_matrix @ transform_matrix + + return transform_matrix + + +def create_node( + base_name: str, + node_id: str, + node_type: str, + position_vec: np.ndarray, + label: str, + color: list, + transform_mat: np.ndarray, + states: dict, + parameters: dict, +) -> options.LIDannerNodeOptions: + """ + Function to create a node with visual and state options. + + Parameters: + base_name (str): The base name to prepend to node_id. + node_id (str): Unique identifier for the node. + position_vec (np.ndarray): The position of the node. + label (str): The visual label for the node. + color (list): RGB color values for the node. + node_type (str): Type of the node ('LINaPDanner' or 'LIDanner'). + transform_mat (np.ndarray): Transformation matrix for positioning. + v0 (float): Initial value for the state option 'v0'. + h0 (float, optional): Initial value for the state option 'h0', only used for some node types. + + Returns: + options.LIDannerNodeOptions: The configured node options object. + """ + # Generate the full name and position + full_name = join_str((base_name, node_id)) + position = multiply_transform(position_vec, transform_mat).tolist() + + # Determine node type and state options + visual_options = options.NodeVisualOptions( + position=position, + label=label, + color=color, + ) + if node_type == "LINaPDanner": + state_options = options.LINaPDannerStateOptions(list(states.values())) + parameters = options.LINaPDannerNodeParameterOptions.defaults(**parameters) + noise = options.OrnsteinUhlenbeckOptions.defaults() + node_options_class = options.LINaPDannerNodeOptions + elif node_type == "LIDanner": + state_options = options.LIDannerStateOptions(list(states.values())) + parameters = options.LIDannerNodeParameterOptions.defaults(**parameters) + noise = options.OrnsteinUhlenbeckOptions.defaults() + node_options_class = options.LIDannerNodeOptions + elif node_type == "Linear": + state_options = None + parameters = options.LinearParameterOptions.defaults(**parameters) + noise = None + node_options_class = options.LinearNodeOptions + elif node_type == "ReLU": + state_options = None + parameters = options.ReLUParameterOptions.defaults(**parameters) + noise = None + node_options_class = options.ReLUNodeOptions + elif node_type == "ExternalRelay": + state_options = None + parameters = options.NodeParameterOptions() + noise = None + visual_options.radius = 0.0 + node_options_class = options.RelayNodeOptions + else: + raise ValueError(f"Unknown node type: {node_type}") + + # Create and return the node options + return node_options_class( + name=full_name, + parameters=parameters, + visual=visual_options, + state=state_options, + noise=noise, + ) + + +def create_nodes( + node_specs: Iterable, + base_name: str, + transform_mat: np.ndarray, +) -> options.NodeOptions: + """Create node using create_method""" + nodes = {} + for ( + node_id, + node_type, + position_vec, + label, + color, + states, + parameters, + ) in node_specs: + nodes[node_id] = create_node( + base_name, + node_id, + node_type, + position_vec, + label, + color, + transform_mat, + states, + parameters, + ) + return nodes + + +def create_edges( + edge_specs: Iterable, + base_name: str, + visual_options: options.EdgeVisualOptions = options.EdgeVisualOptions(), +) -> options.EdgeOptions: + """Create edges from specs""" + edges = {} + for source_tuple, target_tuple, weight, edge_type in edge_specs: + source = join_str((base_name, *source_tuple)) + target = join_str((base_name, *target_tuple)) + edges[join_str((source, "to", target))] = options.EdgeOptions( + source=source, + target=target, + weight=weight, + type=edge_type, + visual=options.EdgeVisualOptions(**visual_options), + ) + return edges + + +class BrainStemDrive: + """ Generate Brainstem drive network """ + + def __init__(self, name="", transform_mat=np.identity(3)): + """Initialization.""" + super().__init__() + self.name = name + self.transform_mat = transform_mat + + def nodes(self): + """Add nodes.""" + node_specs = [ + ( + join_str(("BS", "input")), + "Relay", + np.array((3.0, 0.0)), + "A", + [0.0, 0.0, 0.0], + {}, + {}, + ), + ( + join_str(("BS", "DR")), + "Linear", + np.array((3.0, -1.0)), + "A", + [0.0, 0.0, 0.0], + None, + {"slope": 1.0, "bias": 0.0}, + ), + ] + + # Loop through the node specs to create each node using the create_node function + nodes = create_nodes(node_specs, self.name, self.transform_mat) + return nodes + + def edges(self): + """Add edges.""" + + # Define edge details in a list for easier iteration + edge_specs = [ + (("BS", "input"), ("BS", "DR"), 1.0, "excitatory"), + ] + + # Loop through the edge specs to create each edge + edges = create_edges(edge_specs, self.name) + return edges + + +class RhythmGenerator: + """Generate RhythmGenerator Network""" + + def __init__(self, name="", transform_mat=np.identity(3)): + """Initialization.""" + super().__init__() + self.name = name + self.transform_mat = transform_mat + + def nodes(self): + """Add nodes.""" + node_specs = [ + ( + join_str(("RG", "F")), + "LINaPDanner", + np.array((3.0, 0.0)), + "F", + [1.0, 0.0, 0.0], + {"v": -62.5, "h": np.random.uniform(0, 1)}, + {}, + ), + ( + join_str(("RG", "E")), + "LINaPDanner", + np.array((-3.0, 0.0)), + "E", + [0.0, 1.0, 0.0], + {"v": -62.5, "h": np.random.uniform(0, 1)}, + {}, + ), + ( + join_str(("RG", "In", "F")), + "LIDanner", + np.array((1.0, -1.5)), + "In", + [0.2, 0.2, 0.2], + {"v": -60.0, "a": 0.0}, + {}, + ), + ( + join_str(("RG", "In", "E")), + "LIDanner", + np.array((-1.0, 1.5)), + "In", + [0.2, 0.2, 0.2], + {"v": -60.0, "a": 0.0}, + {}, + ), + # ( + # join_str(("RG", "In", "E2")), + # "LIDanner", + # np.array((-5.0, 1.0)), + # "In", + # [0.2, 0.2, 0.2], + # {"v": -60.0, "a": 0.0}, + # {}, + # ), + # ( + # join_str(("RG", "F", "DR")), + # "Linear", + # np.array((3.0, 2.0)), + # "d", + # [0.5, 0.5, 0.5], # Default visual color if needed + # None, + # {"slope": 0.1, "bias": 0.0}, + # ), + # ( + # join_str(("RG", "E", "DR")), + # "Linear", + # np.array((-3.0, 2.0)), + # "d", + # [0.5, 0.5, 0.5], # Default visual color if needed + # None, + # {"slope": 0.0, "bias": 0.1}, + # ), + ] + + # Loop through the node specs to create each node using the create_node function + nodes = create_nodes(node_specs, self.name, self.transform_mat) + return nodes + + def edges(self): + """Add edges.""" + + # Define edge details in a list for easier iteration + edge_specs = [ + (("RG", "F"), ("RG", "In", "F"), 0.4, "excitatory"), + (("RG", "In", "F"), ("RG", "E"), -1.0, "inhibitory"), + (("RG", "E"), ("RG", "In", "E"), 0.4, "excitatory"), + (("RG", "In", "E"), ("RG", "F"), -0.08, "inhibitory"), + # (("RG", "In", "E2"), ("RG", "F"), -0.04, "inhibitory"), + # (("RG", "F", "DR"), ("RG", "F"), 1.0, "excitatory"), + # (("RG", "E", "DR"), ("RG", "E"), 1.0, "excitatory"), + ] + + # Loop through the edge specs to create each edge + edges = create_edges(edge_specs, self.name) + return edges + + +# class RhythmGenerator: +# """Generate RhythmGenerator Network""" + +# def __init__(self, name="", anchor_x=0.0, anchor_y=0.0): +# """Initialization.""" +# super().__init__() +# self.name = name + +# def nodes(self): +# """Add nodes.""" +# nodes = {} +# nodes["RG-F"] = options.LINaPDannerNodeOptions( +# name=self.name + "-RG-F", +# parameters=options.LINaPDannerNodeParameterOptions.defaults(), +# visual=options.NodeVisualOptions(label="F", color=[1.0, 0.0, 0.0]), +# state=options.LINaPDannerStateOptions.from_kwargs( +# v=-60.5, h=np.random.uniform(0, 1) +# ), +# noise=None +# ) + +# nodes["RG-E"] = options.LINaPDannerNodeOptions( +# name=self.name + "-RG-E", +# parameters=options.LINaPDannerNodeParameterOptions.defaults(), +# visual=options.NodeVisualOptions(label="E", color=[0.0, 1.0, 0.0]), +# state=options.LINaPDannerStateOptions.from_kwargs( +# v=-62.5, h=np.random.uniform(0, 1) +# ), +# noise=None +# ) + +# nodes["In-F"] = options.LIDannerNodeOptions( +# name=self.name + "-In-F", +# parameters=options.LIDannerNodeParameterOptions.defaults(), +# visual=options.NodeVisualOptions(label="In", color=[0.2, 0.2, 0.2]), +# state=options.LIDannerStateOptions.from_kwargs(v=-60.0, a=0.0), +# noise=None +# ) + +# nodes["In-E"] = options.LIDannerNodeOptions( +# name=self.name + "-In-E", +# parameters=options.LIDannerNodeParameterOptions.defaults(), +# visual=options.NodeVisualOptions(label="In", color=[0.2, 0.2, 0.2]), +# state=options.LIDannerStateOptions.from_kwargs(v=-60.0, a=0.0), +# noise=None +# ) +# return nodes + +# def edges(self): +# edges = {} +# edges["RG-F-to-In-F"] = options.EdgeOptions( +# source=self.name + "-RG-F", +# target=self.name + "-In-F", +# weight=0.4, +# type="excitatory", +# visual=options.EdgeVisualOptions(), +# ) +# edges["In-F-to-RG-E"] = options.EdgeOptions( +# source=self.name + "-In-F", +# target=self.name + "-RG-E", +# weight=-1.0, +# type="inhibitory", +# visual=options.EdgeVisualOptions(), +# ) +# edges["In-E-to-RG-F"] = options.EdgeOptions( +# source=self.name + "-In-E", +# target=self.name + "-RG-F", +# weight=-0.08, +# type="inhibitory", +# visual=options.EdgeVisualOptions(), +# ) +# edges["RG-E-to-In-E"] = options.EdgeOptions( +# source=self.name + "-RG-E", +# target=self.name + "-In-E", +# weight=0.4, +# type="excitatory", +# visual=options.EdgeVisualOptions(), +# ) +# return edges + + +def generate_network(iterations=5000): + """ Generate network """ + + # Main network + network_options = options.NetworkOptions( + directed=True, + multigraph=False, + graph={"name": "rhythm_generator"}, + integration=options.IntegrationOptions.defaults( + n_iterations=iterations, + timestep=float(1.0), + ), + logs=options.NetworkLogOptions( + n_iterations=iterations, + buffer_size=iterations, + ) + ) + + # Generate rhythm center + rhythm = RhythmGenerator(name="") + network_options.add_nodes((rhythm.nodes()).values()) + network_options.add_edges((rhythm.edges()).values()) + + flexor_drive = options.LinearNodeOptions( + name="FD", + parameters=options.LinearParameterOptions.defaults(slope=0.1, bias=0.0), + visual=options.NodeVisualOptions(position=(1.0, 0.0)), + noise=None + ) + extensor_drive = options.LinearNodeOptions( + name="ED", + parameters=options.LinearParameterOptions.defaults(slope=0.0, bias=0.1), + visual=options.NodeVisualOptions(position=(1.0, 1.0)), + noise=None + ) + + drive = options.RelayNodeOptions( + name="D", + visual=options.NodeVisualOptions(position=(5.0, 5.0)), + parameters=None, + noise=None + ) + + network_options.add_node(flexor_drive) + network_options.add_node(extensor_drive) + network_options.add_node(drive) + + network_options.add_edge( + options.EdgeOptions( + source="FD", + target="RG_F", + weight=1.0, + type="excitatory", + visual=options.EdgeVisualOptions(), + ) + ) + network_options.add_edge( + options.EdgeOptions( + source="ED", + target="RG_E", + weight=1.0, + type="excitatory", + visual=options.EdgeVisualOptions(), + ) + ) + network_options.add_edge( + options.EdgeOptions( + source="ED", + target="RG_In_E", + weight=1.0, + type="excitatory", + visual=options.EdgeVisualOptions(), + ) + ) + network_options.add_edge( + options.EdgeOptions( + source="D", + target="ED", + weight=1.0, + type="excitatory", + visual=options.EdgeVisualOptions(), + ) + ) + network_options.add_edge( + options.EdgeOptions( + source="D", + target="FD", + weight=1.0, + type="excitatory", + visual=options.EdgeVisualOptions(), + ) + ) + + + # network_options = options.NetworkOptions.from_options( + # read_yaml("/Users/tatarama/projects/work/farms/farms_network/examples/mouse/config/network_options.yaml") + # ) + graph = nx.node_link_graph( + network_options, + directed=True, + multigraph=False, + link="edges", + name="name", + source="source", + target="target" + ) + # node_positions = nx.spring_layout(graph) + # network_options.save("/tmp/rhythm.yaml") + # for index, node in enumerate(network_options.nodes): + # node.visual.position[:2] = node_positions[node.name] + + data = NetworkData.from_options(network_options) + + network = Network.from_options(network_options) + + # network.setup_integrator(network_options) + rk4solver = RK4Solver(network.nstates, network_options.integration.timestep) + + integrator = ode(network.get_ode_func()).set_integrator( + u'dopri5', + method=u'adams', + max_step=0.0, + # nsteps=0 + ) + nnodes = len(network_options.nodes) + integrator.set_initial_value(np.zeros(len(data.states.array[:]),), 0.0) + + # print("Data ------------", np.array(network.data.states.array)) + + # data.to_file("/tmp/sim.hdf5") + + # # Integrate + states = np.ones((iterations, len(data.states.array[:])))*1.0 + states_tmp = np.zeros((len(data.states.array[:],))) + outputs = np.ones((iterations, len(data.outputs.array[:])))*1.0 + # states[0, 2] = -1.0 + + # for index, node in enumerate(network_options.nodes): + # print(index, node.name) + # network.data.external_inputs.array[:] = np.ones((1,))*(iteration/iterations)*1.0 + inputs = np.ones(np.shape(network.data.external_inputs.array[:])) + # print(np.array(network.data.connectivity.weights), np.array(network.data.connectivity.edge_indices), np.array(network.data.connectivity.node_indices), np.array(network.data.connectivity.index_offsets)) + for iteration in tqdm(range(0, iterations), colour='green', ascii=' >='): + time = iteration + # network.step(network.ode, iteration*1e-3, network.data.states.array) + # network.step() + # states[iteration+1, :] = network.data.states.array + # network.step() + # network.evaluate(iteration*1e-3, states[iteration, :]) + + _iter = network._network_cy.iteration + network.data.times.array[_iter] = time + network.data.external_inputs.array[:] = inputs*0.5 + # integrator.set_initial_value(integrator.y, integrator.t) + # integrator.integrate(integrator.t+1.0) + # network.data.states.array[:] = integrator.y + rk4solver.step(network._network_cy, time, network.data.states.array) + # outputs[iteration, :] = network.data.outputs.array + # states[iteration, :] = integrator.y# network.data.states.array + # network._network_cy.update_iteration() + network._network_cy.iteration += 1 + network._network_cy.update_logs(network._network_cy.iteration) + + # network.data.to_file("/tmp/network.h5") + nodes_data = network.log.nodes + plt.figure() + plt.plot( + np.linspace(0.0, iterations*1e-3, iterations), states[:, :], + ) + plt.figure() + plt.fill_between( + np.linspace(0.0, iterations*1e-3, iterations), np.asarray(nodes_data[0].output.array), + alpha=0.2, lw=1.0, + ) + plt.plot( + np.linspace(0.0, iterations*1e-3, iterations), np.asarray(nodes_data[0].output.array), + label="RG-F" + ) + plt.fill_between( + np.linspace(0.0, iterations*1e-3, iterations), np.asarray(nodes_data[1].output.array), + alpha=0.2, lw=1.0, + ) + plt.plot( + np.linspace(0.0, iterations*1e-3, iterations), np.asarray(nodes_data[1].output.array), label="RG-E" + ) + plt.legend() + + plt.figure() + # node_positions = nx.circular_layout(graph) + # node_positions = nx.forceatlas2_layout(graph) + node_positions = {} + for index, node in enumerate(network_options.nodes): + node_positions[node.name] = node.visual.position[:2] + + _ = nx.draw_networkx_nodes( + graph, + pos=node_positions, + node_color=[data["visual"]["color"] for node, data in graph.nodes.items()], + alpha=0.25, + edgecolors='k', + linewidths=2.0, + ) + nx.draw_networkx_labels( + graph, + pos=node_positions, + labels={node: data["visual"]["label"] for node, data in graph.nodes.items()}, + font_size=11.0, + font_weight='bold', + font_family='sans-serif', + alpha=1.0, + ) + nx.draw_networkx_edges( + graph, + pos=node_positions, + edge_color=[ + [0.0, 1.0, 0.0] if data["type"] == "excitatory" else [1.0, 0.0, 0.0] + for edge, data in graph.edges.items() + ], + width=1., + arrowsize=10, + style='dashed', + arrows=True, + min_source_margin=5, + min_target_margin=5, + connectionstyle="arc3,rad=-0.2", + ) + # plt.figure() + # sparse_array = nx.to_scipy_sparse_array(graph) + # sns.heatmap( + # sparse_array.todense(), cbar=False, square=True, + # linewidths=0.5, + # annot=True + # ) + plt.show() + + # generate_tikz_figure( + # graph, + # paths.get_project_data_path().joinpath("templates", "network",), + # "tikz-full-network.tex", + # paths.get_project_images_path().joinpath("quadruped_network.tex") + # ) + + +def main(): + """Main.""" + + # Generate the network + # profile.profile(generate_network) + generate_network() + + # Run the network + # run_network() + + +if __name__ == "__main__": + main() diff --git a/examples/righetti08/run.py b/examples/righetti08/run.py new file mode 100644 index 0000000..40552fa --- /dev/null +++ b/examples/righetti08/run.py @@ -0,0 +1,255 @@ +""" +Hopf Oscillator + +[1]L. Righetti and A. J. Ijspeert, “Pattern generators with sensory +feedback for the control of quadruped locomotion,” in 2008 IEEE +International Conference on Robotics and Automation, May 2008, +pp. 819–824. doi: 10.1109/ROBOT.2008.4543306. +""" + + +import farms_pylog as pylog +import matplotlib.pyplot as plt +import networkx as nx +import numpy as np +import seaborn as sns +from farms_core.utils import profile +from farms_network.core import options +from farms_network.core.data import NetworkData +from farms_network.core.network import Network +from tqdm import tqdm + +plt.rcParams['text.usetex'] = False + + +def join_strings(strings): + return "_".join(strings) + + +class RhythmDrive: + """ Generate Drive Network """ + + def __init__(self, name="", anchor_x=0.0, anchor_y=0.0): + """Initialization.""" + super().__init__() + self.name = name + + def nodes(self): + """Add nodes.""" + nodes = {} + name = join_strings((self.name, "RG", "F", "DR")) + nodes[name] = options.LinearNodeOptions( + name=name, + parameters=options.LinearParameterOptions.defaults(slope=0.1, bias=0.0), + visual=options.NodeVisualOptions(), + ) + name = join_strings((self.name, "RG", "E", "DR")) + nodes[name] = options.LinearNodeOptions( + name=name, + parameters=options.LinearParameterOptions.defaults(slope=0.0, bias=0.1), + visual=options.NodeVisualOptions(), + ) + + return nodes + + +def generate_network(iterations=20000): + """ Generate network """ + + # Main network + network_options = options.NetworkOptions( + directed=True, + multigraph=False, + graph={"name": "rhigetti08"}, + integration=options.IntegrationOptions.defaults( + n_iterations=iterations, + timestep=float(1e-3), + ), + logs=options.NetworkLogOptions( + n_iterations=iterations, + buffer_size=iterations, + ) + ) + + # Generate rhythm centers + n_oscillators = 4 + + # Neuron + # Create an oscillator for each joint + num_oscillators = 4 + oscillator_names = [f'n{num}' for num in range(num_oscillators)] + for j, neuron_name in enumerate(oscillator_names): + network_options.add_node( + options.HopfOscillatorNodeOptions( + name=neuron_name, + parameters=options.HopfOscillatorNodeParameterOptions.defaults( + mu=1.0, + omega=5.0, + alpha=5.0, + beta=5.0, + ), + visual=options.NodeVisualOptions( + label=f"{j}", color=[1.0, 0.0, 0.0] + ), + state=options.HopfOscillatorStateOptions.from_kwargs( + x=np.random.uniform(0.0, 1.0), + y=np.random.uniform(0.0, 1.0), + ), + noise=None, + ) + ) + + # Connect edges + connection_matrix_walk = np.asarray( + [ + [0, -1, 1, -1], + [-1, 0, -1, 1], + [-1, 1, 0, -1], + [1, -1, -1, 0] + ] + ).T + + connection_matrix_trot = np.asarray( + [ + [0, -1, -1, 1], + [-1, 0, 1, -1], + [-1, 1, 0, -1], + [1, -1, -1, 0] + ] + ).T + + for i, j in zip(*np.nonzero(connection_matrix_trot)): + network_options.add_edge( + options.EdgeOptions( + source=oscillator_names[i], + target=oscillator_names[j], + weight=connection_matrix_trot[i, j]*1, + type="excitatory", + visual=options.EdgeVisualOptions(), + ) + ) + + data = NetworkData.from_options(network_options) + + network = Network.from_options(network_options) + network.setup_integrator(network_options) + + # nnodes = len(network_options.nodes) + # integrator.set_initial_value(np.zeros(len(data.states.array),), 0.0) + + # print("Data ------------", np.array(network.data.states.array)) + + # data.to_file("/tmp/sim.hdf5") + + # integrator.integrate(integrator.t + 1e-3) + + # # Integrate + states = np.ones((iterations+1, len(data.states.array)))*0.0 + outputs = np.ones((iterations, len(data.outputs.array)))*1.0 + # states[0, 2] = -1.0 + + # for index, node in enumerate(network_options.nodes): + # print(index, node.name) + # network.data.external_inputs.array[:] = np.ones((1,))*(iteration/iterations)*1.0 + for iteration in tqdm(range(0, iterations), colour='green', ascii=' >='): + # network.step(network.ode, iteration*1e-3, network.data.states.array) + # network.step() + # states[iteration+1, :] = network.data.states.array + network.step() + network.data.times.array[iteration] = iteration*1e-3 + + # network.data.to_file("/tmp/network.h5") + plt.figure() + for j in range(n_oscillators): + plt.fill_between( + np.array(network.data.times.array), + 2*j + (1 + np.sin(network.data.nodes[j].output.array)), + 2*j, + alpha=0.2, + lw=1.0, + ) + plt.plot( + np.array(network.data.times.array), + 2*j + (1 + np.sin(network.data.nodes[j].output.array)), + label=f"{j}" + ) + plt.legend() + + graph = nx.node_link_graph( + network_options, + directed=True, + multigraph=False, + link="edges", + name="name", + source="source", + target="target" + ) + plt.figure() + node_positions = nx.circular_layout(graph) + node_positions = nx.spring_layout(graph) + for index, node in enumerate(network_options.nodes): + node.visual.position[:2] = node_positions[node.name] + + network_options.save("/tmp/network_options.yaml") + + _ = nx.draw_networkx_nodes( + graph, + pos=node_positions, + node_color=[data["visual"]["color"] for node, data in graph.nodes.items()], + alpha=0.25, + edgecolors='k', + linewidths=2.0, + ) + nx.draw_networkx_labels( + graph, + pos=node_positions, + labels={node: data["visual"]["label"] for node, data in graph.nodes.items()}, + font_size=11.0, + font_weight='bold', + font_family='sans-serif', + alpha=1.0, + ) + nx.draw_networkx_edges( + graph, + pos=node_positions, + edge_color=[ + [0.0, 1.0, 0.0] if data["type"] == "excitatory" else [1.0, 0.0, 0.0] + for edge, data in graph.edges.items() + ], + width=1., + arrowsize=10, + style='dashed', + arrows=True, + min_source_margin=5, + min_target_margin=5, + connectionstyle="arc3,rad=-0.2", + ) + plt.figure() + sparse_array = nx.to_scipy_sparse_array(graph) + sns.heatmap( + sparse_array.todense(), cbar=False, square=True, + linewidths=0.5, + annot=True + ) + plt.show() + + # generate_tikz_figure( + # graph, + # paths.get_project_data_path().joinpath("templates", "network",), + # "tikz-full-network.tex", + # paths.get_project_images_path().joinpath("quadruped_network.tex") + # ) + + +def main(): + """Main.""" + + # Generate the network + profile.profile(generate_network) + + # Run the network + # run_network() + + +if __name__ == "__main__": + main() diff --git a/examples/zhang22/run.py b/examples/zhang22/run.py index 939b795..3cdbf6b 100644 --- a/examples/zhang22/run.py +++ b/examples/zhang22/run.py @@ -2,76 +2,100 @@ https://doi.org/10.7554/eLife.73424 paper network """ -import networkx as nx -import numpy as np -import farms_pylog as pylog import os +from copy import deepcopy +from pprint import pprint - -class RhythmGenerator(nx.DiGraph): +import farms_pylog as pylog +import matplotlib.pyplot as plt +import networkx as nx +import numpy as np +from farms_core.io.yaml import read_yaml, write_yaml +from farms_core.options import Options +from farms_network.core import options +from farms_network.core.data import (NetworkConnectivity, NetworkData, + NetworkStates) +from farms_network.core.network import PyNetwork, rk4 +from farms_network.core.options import NetworkOptions +from scipy.integrate import ode +from tqdm import tqdm + + +class RhythmGenerator: """Generate RhythmGenerator Network""" def __init__(self, name="", anchor_x=0.0, anchor_y=0.0): """Initialization.""" - super().__init__(name=name) - - @classmethod - def generate_nodes_edges(cls, name, anchor_x=0.0, anchor_y=0.0): - obj = cls(name, anchor_x, anchor_y) - obj.add_neurons(anchor_x, anchor_y) - obj.add_connections() - return obj - - def add_neurons(self, anchor_x, anchor_y): - """Add neurons.""" - self.add_node( - self.name + "-RG-F", - label="F", - model="lif_danner_nap", - x=1.0 + anchor_x, - y=0.0 + anchor_y, - color="r", - m_e=0.1, - b_e=0.0, - v0=-62.5, - h0=np.random.uniform(0, 1), - ) - self.add_node( - self.name + "-RG-E", - label="E", - model="lif_danner_nap", - x=1.0 + anchor_x, - y=4.0 + anchor_y, - color="b", - m_e=0.0, - b_e=0.1, - v0=-62.5, - h0=np.random.uniform(0, 1), - ) - self.add_node( - self.name + "-In-F", - label="In", - model="lif_danner", - x=0.0 + anchor_x, - y=2.0 + anchor_y, - color="m", - v0=-60.0, - ) - self.add_node( - self.name + "-In-E", - label="In", - model="lif_danner", - x=2.0 + anchor_x, - y=2.0 + anchor_y, - color="m", - v0=-60.0, - ) - - def add_connections(self): - self.add_edge(self.name + "-RG-F", self.name + "-In-F", weight=0.4) - self.add_edge(self.name + "-In-F", self.name + "-RG-E", weight=-1.0) - self.add_edge(self.name + "-RG-E", self.name + "-In-E", weight=0.4) - self.add_edge(self.name + "-In-E", self.name + "-RG-F", weight=-0.08) + super().__init__() + self.name = name + + def nodes(self): + """Add nodes.""" + nodes = {} + nodes["RG-F"] = options.LINaPDannerNodeOptions( + name=self.name + "-RG-F", + parameters=options.LINaPDannerParameterOptions.defaults(), + visual=options.NodeVisualOptions(label="F", color=[1.0, 0.0, 0.0]), + state=options.LINaPDannerStateOptions.from_kwargs( + v0=-62.5, h0=np.random.uniform(0, 1) + ), + ) + + nodes["RG-E"] = options.LINaPDannerNodeOptions( + name=self.name + "-RG-E", + parameters=options.LINaPDannerParameterOptions.defaults(), + visual=options.NodeVisualOptions(label="E", color=[0.0, 1.0, 0.0]), + state=options.LINaPDannerStateOptions.from_kwargs( + v0=-62.5, h0=np.random.uniform(0, 1) + ), + ) + + nodes["In-F"] = options.LIDannerNodeOptions( + name=self.name + "-In-F", + parameters=options.LIDannerParameterOptions.defaults(), + visual=options.NodeVisualOptions(label="In", color=[0.2, 0.2, 0.2]), + state=options.LIDannerStateOptions.from_kwargs(v0=-60.0,), + ) + + nodes["In-E"] = options.LIDannerNodeOptions( + name=self.name + "-In-E", + parameters=options.LIDannerParameterOptions.defaults(), + visual=options.NodeVisualOptions(label="In", color=[0.2, 0.2, 0.2]), + state=options.LIDannerStateOptions.from_kwargs(v0=-60.0,), + ) + return nodes + + def edges(self): + edges = {} + edges["RG-F-to-In-F"] = options.EdgeOptions( + source=self.name + "-RG-F", + target=self.name + "-In-F", + weight=0.4, + type="excitatory", + visual=options.EdgeVisualOptions(), + ) + edges["In-F-to-RG-E"] = options.EdgeOptions( + source=self.name + "-In-F", + target=self.name + "-RG-E", + weight=-1.0, + type="inhibitory", + visual=options.EdgeVisualOptions(), + ) + edges["RG-E-to-In-E"] = options.EdgeOptions( + source=self.name + "-RG-E", + target=self.name + "-In-E", + weight=0.4, + type="excitatory", + visual=options.EdgeVisualOptions(), + ) + edges["In-E-to-RG-F"] = options.EdgeOptions( + source=self.name + "-In-E", + target=self.name + "-RG-F", + weight=-0.08, + type="inhibitory", + visual=options.EdgeVisualOptions(), + ) + return edges class Commissural(nx.DiGraph): @@ -82,27 +106,24 @@ def __init__(self, name="", anchor_x=0.0, anchor_y=0.0): """Initialization.""" super().__init__(name=name) - @classmethod - def generate_nodes_edges(cls, name, anchor_x=0.0, anchor_y=0.0): - obj = cls(name, anchor_x, anchor_y) - obj.add_neurons(anchor_x, anchor_y) - obj.add_connections() - return obj - - def add_neurons(self, anchor_x, anchor_y,): - """Add neurons.""" - + def nodes(self): + """Add nodes.""" + nodes = {} # V3 - # for side in ("LEFT", "RIGHT"): - # for leg in ("FORE", "HIND"): - self.add_node( - self.name + "-V3-E-Left-Fore", - label="V3-E", - model="lif_danner", - x=-1.0 + anchor_x, - y=8.0 + anchor_y, - color="g", - v0=-60.0, + node[] = options.LINaPDannerNodeOptions( + name=self.name + "-RG-F", + parameters=options.LINaPDannerParameterOptions.defaults(), + visual=options.NodeVisualOptions(label="F", color=[1.0, 0.0, 0.0]), + state=options.LINaPDannerStateOptions.from_kwargs( + v0=-62.5, h0=np.random.uniform(0, 1) + ), + ) + + nodes[self.name + "-V3-E-Left-Fore"] = options.LIDannerNodeOptions( + name=self.name + "-V3-E-Left-Fore", + parameters=options.LIDannerParameterOptions.defaults(), + visual=options.NodeVisualOptions(label="In", color=[0.2, 0.2, 0.2]), + state=options.LIDannerStateOptions.from_kwargs(v0=-60.0,), ) self.add_node( @@ -419,64 +440,143 @@ def generate_network(): """ Generate network """ # Main network - network = nx.DiGraph() + network_options = options.NetworkOptions( + directed=True, + multigraph=False, + graph={"name": "zhang2022"}, + ) # Generate rhythm centers - rhythm1 = RhythmGenerator.generate_nodes_edges(name="LEFT-FORE", anchor_x=-5.0, anchor_y=4.0) - rhythm2 = RhythmGenerator.generate_nodes_edges(name="LEFT-HIND", anchor_x=-5.0, anchor_y=-6.0) - rhythm3 = RhythmGenerator.generate_nodes_edges(name="RIGHT-FORE", anchor_x=5.0, anchor_y=4.0) - rhythm4 = RhythmGenerator.generate_nodes_edges(name="RIGHT-HIND", anchor_x=5.0, anchor_y=-6.0) - - # Commissural - commissural = Commissural.generate_nodes_edges(name="commissural") - - # LSPN - lpsn = LPSN.generate_nodes_edges(name="lspn") - - network = nx.compose_all( - [rhythm1, rhythm2, rhythm3, rhythm4, commissural, lpsn] + for side in ("LEFT", "RIGHT"): + for limb in ("FORE", "HIND"): + rhythm = RhythmGenerator(name=f"{side}-{limb}") + network_options.add_nodes((rhythm.nodes()).values()) + network_options.add_edges((rhythm.edges()).values()) + + flexor_drive = options.LinearNodeOptions( + name="FD", + parameters=options.LinearParameterOptions.defaults(slope=0.1, bias=0.0), + visual=options.NodeVisualOptions(), ) - - colors = { - "RG-F": "fill=flex!40", - "RG-E": "fill=ext!40", - "In-E": "fill=inf!40", - "In-F": "fill=inf!40", - "V2a-diag": "fill=red!40", - "commissural-CINi1": "fill=green!40", - "commissural-V0V": "fill=red!40", - "commissural-V0D": "fill=green!40", - "commissural-V3": "fill=red!40", - "commissural-V2a": "fill=green!40", - "commissural-IniV0V": "fill=red!40", - } - - node_options = { - name: colors.get("-".join(name.split("-")[-2:]), "fill=yellow!40") - for name, node in network.nodes.items() - } - - nx.write_graphml(network, "./config/auto_zhang_2022.graphml") - nx.write_latex( - network, - "zhang2022_figure.tex", - pos={name: (node['x'], node['y']) for name, node in network.nodes.items()}, - as_document=True, - caption="A path graph", - latex_label="fig1", - node_options=node_options, - default_node_options="my-node", - node_label={ - name: node["label"] - for name, node in network.nodes.items() - }, - default_edge_options="[color=black, thick, -{Latex[scale=1.0]}, bend left, looseness=0.75]", - document_wrapper=_DOC_WRAPPER_TIKZ, + extensor_drive = options.LinearNodeOptions( + name="ED", + parameters=options.LinearParameterOptions.defaults(slope=0.0, bias=0.1), + visual=options.NodeVisualOptions(), ) - - latex_code = nx.to_latex(network) # a string rather than a file - - os.system("pdflatex zhang2022_figure.tex") + network_options.add_node(flexor_drive) + network_options.add_node(extensor_drive) + for side in ("LEFT", "RIGHT"): + for limb in ("FORE", "HIND"): + network_options.add_edge( + options.EdgeOptions( + source="FD", + target=f"{side}-{limb}-RG-F", + weight=1.0, + type="excitatory", + visual=options.EdgeVisualOptions(), + ) + ) + network_options.add_edge( + options.EdgeOptions( + source="ED", + target=f"{side}-{limb}-RG-E", + weight=1.0, + type="excitatory", + visual=options.EdgeVisualOptions(), + ) + ) + + data = NetworkData.from_options(network_options) + + network = PyNetwork.from_options(network_options) + + # nnodes = len(network_options.nodes) + # integrator.set_initial_value(np.zeros(len(data.states.array),), 0.0) + + # print("Data ------------", np.array(network.data.states.array)) + + # data.to_file("/tmp/sim.hdf5") + + # integrator.integrate(integrator.t + 1e-3) + + # # Integrate + iterations = 10000 + states = np.ones((iterations+1, len(data.states.array)))*1.0 + outputs = np.ones((iterations, len(data.outputs.array)))*1.0 + # states[0, 2] = -1.0 + + for iteration in tqdm(range(0, iterations), colour='green', ascii=' >='): + network.data.external_inputs.array[:] = np.ones((1,))*(iteration/iterations)*1.0 + states[iteration+1, :] = rk4(iteration*1e-3, states[iteration, :], network.ode, step_size=1) + outputs[iteration, :] = network.data.outputs.array + + plt.figure() + plt.fill_between( + np.linspace(0.0, iterations*1e-3, iterations), outputs[:, 0], + alpha=0.2, lw=1.0, + ) + plt.plot( + np.linspace(0.0, iterations*1e-3, iterations), outputs[:, 0], + label="RG-F" + ) + plt.fill_between( + np.linspace(0.0, iterations*1e-3, iterations), outputs[:, 1], + alpha=0.2, lw=1.0, + ) + plt.plot(np.linspace(0.0, iterations*1e-3, iterations), outputs[:, 1], label="RG-E") + plt.legend() + + network_options.save("/tmp/netwok_options.yaml") + + graph = nx.node_link_graph( + network_options, + directed=True, + multigraph=False, + link="edges", + name="name", + source="source", + target="target" + ) + plt.figure() + nx.draw( + graph, pos=nx.nx_agraph.graphviz_layout(graph), + node_shape="o", + connectionstyle="arc3,rad=-0.2", + with_labels=True, + ) + plt.show() + + # # Commissural + # commissural = Commissural.generate_nodes_edges(name="commissural") + + # # LSPN + # lpsn = LPSN.generate_nodes_edges(name="lspn") + + # network = nx.compose_all( + # [rhythm1, rhythm2, rhythm3, rhythm4, commissural, lpsn] + # ) + + # nx.write_graphml(network, "./config/auto_zhang_2022.graphml") + # nx.write_latex( + # network, + # "zhang2022_figure.tex", + # pos={name: (node['x'], node['y']) for name, node in network.nodes.items()}, + # as_document=True, + # caption="A path graph", + # latex_label="fig1", + # node_options=node_options, + # default_node_options="my-node", + # node_label={ + # name: node["label"] + # for name, node in network.nodes.items() + # }, + # default_edge_options="[color=black, thick, -{Latex[scale=1.0]}, bend left, looseness=0.75]", + # document_wrapper=_DOC_WRAPPER_TIKZ, + # ) + + # latex_code = nx.to_latex(network) # a string rather than a file + + # os.system("pdflatex zhang2022_figure.tex") def main(): diff --git a/farms_network/data/__init__.py b/farms_network/analysis/__init__.py similarity index 100% rename from farms_network/data/__init__.py rename to farms_network/analysis/__init__.py diff --git a/farms_network/analysis/plot.py b/farms_network/analysis/plot.py new file mode 100644 index 0000000..ef4e2df --- /dev/null +++ b/farms_network/analysis/plot.py @@ -0,0 +1,77 @@ +import networkx as nx +from farms_core.analysis import plot + + +def visualize(network_options): + """ Visualize network """ + + graph = nx.node_link_graph( + network_options, + directed=True, + multigraph=False, + link="edges", + name="name", + source="source", + target="target", + ) + + plt.figure() + pos_circular = nx.circular_layout(graph) + pos_spring = nx.spring_layout(graph) + pos_graphviz = nx.nx_agraph.pygraphviz_layout(graph) + + _ = nx.draw_networkx_nodes( + graph, + pos={ + node: data["visual"]["position"][:2] + for node, data in graph.nodes.items() + }, + node_color=[ + data["visual"]["color"] + for node, data in graph.nodes.items() + ], + alpha=0.25, + edgecolors="k", + linewidths=2.0, + node_size=[ + 300*data["visual"]["radius"] + for node, data in graph.nodes.items() + ], + ) + nx.draw_networkx_labels( + graph, + pos={ + node: data["visual"]["position"][:2] + for node, data in graph.nodes.items() + }, + labels={ + node: data["visual"]["label"] + for node, data in graph.nodes.items() + }, + font_size=11.0, + font_weight="bold", + font_family="sans-serif", + alpha=1.0, + ) + nx.draw_networkx_edges( + graph, + pos={ + node: data["visual"]["position"][:2] + for node, data in graph.nodes.items() + }, + edge_color=[ + [0.3, 1.0, 0.3] if data["type"] == "excitatory" else [0.7, 0.3, 0.3] + for edge, data in graph.edges.items() + ], + width=1.0, + arrowsize=10, + style="-", + arrows=True, + min_source_margin=5, + min_target_margin=5, + connectionstyle=[ + data["visual"]["connectionstyle"] + for edge, data in graph.edges.items() + ], + ) + plt.show() diff --git a/farms_network/data/data_cy.pyx b/farms_network/core/__init__.py similarity index 98% rename from farms_network/data/data_cy.pyx rename to farms_network/core/__init__.py index 4b3700e..f411717 100644 --- a/farms_network/data/data_cy.pyx +++ b/farms_network/core/__init__.py @@ -1,5 +1,5 @@ """ ------------------------------------------------------------------------ +---------------------------------------------------------------------- Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne @@ -16,4 +16,3 @@ limitations under the License. ----------------------------------------------------------------------- """ - diff --git a/farms_network/core/data.py b/farms_network/core/data.py new file mode 100644 index 0000000..a2c7cff --- /dev/null +++ b/farms_network/core/data.py @@ -0,0 +1,544 @@ +""" + +Main data structure for the network + +""" + +from pprint import pprint +from typing import Dict, Iterable, List + +import numpy as np +from farms_core import pylog +from farms_core.array.array import to_array +from farms_core.array.array_cy import (DoubleArray1D, DoubleArray2D, + IntegerArray1D) +from farms_core.array.types import (NDARRAY_V1, NDARRAY_V1_D, NDARRAY_V2_D, + NDARRAY_V3_D) +from farms_core.io.hdf5 import dict_to_hdf5, hdf5_to_dict + +from .data_cy import (NetworkConnectivityCy, NetworkDataCy, NetworkLogCy, NetworkNoiseCy, + NetworkStatesCy, NetworkLogStatesCy) +from .options import NetworkOptions, NodeOptions, NodeStateOptions + + +NPDTYPE = np.float64 +NPUITYPE = np.uintc + + +class NetworkData(NetworkDataCy): + """ Network data """ + + def __init__( + self, + states, + derivatives, + connectivity, + outputs, + tmp_outputs, + external_inputs, + noise, + nodes, + **kwargs, + ): + """ Network data structure """ + + super().__init__() + + self.states = states + self.derivatives = derivatives + self.connectivity = connectivity + self.outputs = outputs + self.tmp_outputs = tmp_outputs + self.external_inputs = external_inputs + self.noise = noise + + self.nodes: List[NodeData] = nodes + + # assert that the data created is c-contiguous + assert self.states.array.is_c_contig() + assert self.derivatives.array.is_c_contig() + assert self.outputs.array.is_c_contig() + assert self.tmp_outputs.array.is_c_contig() + assert self.external_inputs.array.is_c_contig() + + @classmethod + def from_options(cls, network_options: NetworkOptions): + """ From options """ + + states = NetworkStates.from_options(network_options) + derivatives = NetworkStates.from_options(network_options) + connectivity = NetworkConnectivity.from_options(network_options) + + outputs = DoubleArray1D( + array=np.full( + shape=(len(network_options.nodes),), + fill_value=0, + dtype=NPDTYPE, + ) + ) + + tmp_outputs = DoubleArray1D( + array=np.full( + shape=(len(network_options.nodes),), + fill_value=0, + dtype=NPDTYPE, + ) + ) + + external_inputs = DoubleArray1D( + array=np.full( + shape=len(network_options.nodes), + fill_value=0, + dtype=NPDTYPE, + ) + ) + nodes = Nodes(network_options, states, outputs, external_inputs) + + noise = NetworkNoise.from_options(network_options) + + return cls( + states=states, + derivatives=derivatives, + connectivity=connectivity, + outputs=outputs, + tmp_outputs=tmp_outputs, + external_inputs=external_inputs, + noise=noise, + nodes=nodes, + ) + + def to_dict(self, iteration: int = None) -> Dict: + """Convert data to dictionary""" + return { + 'times': to_array(self.times.array), + 'states': self.states.to_dict(), + 'derivatives': self.derivatives.to_dict(), + 'connectivity': self.connectivity.to_dict(), + 'outputs': to_array(self.outputs.array), + 'tmp_outputs': to_array(self.tmp_outputs.array), + 'external_inputs': to_array(self.external_inputs.array), + 'noise': self.noise.to_dict(), + 'nodes': {node.name: node.to_dict() for node in self.nodes}, + } + + def to_file(self, filename: str, iteration: int = None): + """Save data to file""" + pylog.info('Exporting to dictionary') + data_dict = self.to_dict(iteration) + pylog.info('Saving data to %s', filename) + dict_to_hdf5(filename=filename, data=data_dict) + pylog.info('Saved data to %s', filename) + + +class NetworkStates(NetworkStatesCy): + + def __init__(self, array, indices): + super().__init__(array, indices) + + @classmethod + def from_options(cls, network_options: NetworkOptions): + + nodes = network_options.nodes + nstates = 0 + indices = [0,] + for index, node in enumerate(nodes): + nstates += node._nstates + indices.append(nstates) + return cls( + array=np.array(np.zeros((nstates,)), dtype=NPDTYPE), + indices=np.array(indices) + ) + + def to_dict(self, iteration: int = None) -> Dict: + """Convert data to dictionary""" + return { + 'array': to_array(self.array), + 'indices': to_array(self.indices), + } + + +class NetworkConnectivity(NetworkConnectivityCy): + + def __init__(self, node_indices, edge_indices, weights, index_offsets): + super().__init__(node_indices, edge_indices, weights, index_offsets) + + @classmethod + def from_options(cls, network_options: NetworkOptions): + + nodes = network_options.nodes + edges = network_options.edges + + connectivity = np.full( + shape=(len(edges), 4), + fill_value=0, + dtype=NPDTYPE, + ) + node_names = [node.name for node in nodes] + + for index, edge in enumerate(edges): + connectivity[index][0] = int(node_names.index(edge.source)) + connectivity[index][1] = int(node_names.index(edge.target)) + connectivity[index][2] = edge.weight + connectivity[index][3] = index + connectivity = np.array(sorted(connectivity, key=lambda col: col[1])) + + node_indices = np.full( + shape=len(edges), + fill_value=0, + dtype=NPDTYPE, + ) + weights = np.full( + shape=len(edges), + fill_value=0, + dtype=NPDTYPE, + ) + edge_indices = np.full( + shape=len(edges), + fill_value=0, + dtype=NPDTYPE, + ) + nedges = 0 + index_offsets = [] + if len(edges) > 0: + index_offsets.append(0) + for index, node in enumerate(nodes): + _node_indices = connectivity[connectivity[:, 1] == index][:, 0].tolist() + _weights = connectivity[connectivity[:, 1] == index][:, 2].tolist() + _edge_indices = connectivity[connectivity[:, 1] == index][:, 3].tolist() + nedges += len(_node_indices) + index_offsets.append(nedges) + node_indices[index_offsets[index]:index_offsets[index+1]] = _node_indices + edge_indices[index_offsets[index]:index_offsets[index+1]] = _edge_indices + weights[index_offsets[index]:index_offsets[index+1]] = _weights + return cls( + node_indices=np.array(node_indices, dtype=NPUITYPE), + edge_indices=np.array(edge_indices, dtype=NPUITYPE), + weights=np.array(weights, dtype=NPDTYPE), + index_offsets=np.array(index_offsets, dtype=NPUITYPE) + ) + + def to_dict(self, iteration: int = None) -> Dict: + """Convert data to dictionary""" + return { + 'node_indices': to_array(self.node_indices), + 'edge_indices': to_array(self.edge_indices), + 'weights': to_array(self.weights), + 'index_offsets': to_array(self.index_offsets), + } + + +class NetworkNoise(NetworkNoiseCy): + """ Data for network noise modeling """ + + def __init__(self, states, indices, drift, diffusion, outputs): + super().__init__(states, indices, drift, diffusion, outputs) + + @classmethod + def from_options(cls, network_options: NetworkOptions): + + nodes = network_options.nodes + n_noise_states = 0 + n_nodes = len(nodes) + + indices = [] + for index, node in enumerate(nodes): + if node.noise: + if node.noise.is_stochastic: + n_noise_states += 1 + indices.append(index) + + return cls( + states=np.full( + shape=n_noise_states, + fill_value=0.0, + dtype=NPDTYPE, + ), + drift=np.full( + shape=n_noise_states, + fill_value=0.0, + dtype=NPDTYPE, + ), + diffusion=np.full( + shape=n_noise_states, + fill_value=0.0, + dtype=NPDTYPE, + ), + indices=np.array( + indices, + dtype=NPUITYPE, + ), + outputs=np.full( + shape=n_nodes, + fill_value=0.0, + dtype=NPDTYPE, + ) + ) + + def to_dict(self, iteration: int = None) -> Dict: + """Convert data to dictionary""" + return { + 'states': to_array(self.states), + 'indices': to_array(self.indices), + 'drift': to_array(self.drift), + 'diffusion': to_array(self.diffusion), + 'outputs': to_array(self.outputs), + } + + +class NetworkLogStates(NetworkLogStatesCy): + + def __init__(self, array, indices): + super().__init__(array, indices) + + @classmethod + def from_options(cls, network_options: NetworkOptions): + + nodes = network_options.nodes + nstates = 0 + indices = [0,] + buffer_size = network_options.logs.buffer_size + for index, node in enumerate(nodes): + nstates += node._nstates + indices.append(nstates) + return cls( + array=np.array(np.zeros((buffer_size, nstates)), dtype=NPDTYPE), + indices=np.array(indices) + ) + + def to_dict(self, iteration: int = None) -> Dict: + """Convert data to dictionary""" + return { + 'array': to_array(self.array), + 'indices': to_array(self.indices), + } + + + +class NetworkLog(NetworkLogCy): + """ Network Logs """ + + def __init__( + self, + times, + states, + connectivity, + outputs, + external_inputs, + noise, + nodes, + **kwargs, + ): + """ Network data structure """ + + super().__init__() + + self.times = times + self.states = states + self.connectivity = connectivity + self.outputs = outputs + self.external_inputs = external_inputs + self.noise = noise + + self.nodes: Nodes = nodes + + # assert that the data created is c-contiguous + assert self.states.array.is_c_contig() + assert self.outputs.array.is_c_contig() + assert self.external_inputs.array.is_c_contig() + + @classmethod + def from_options(cls, network_options: NetworkOptions): + """ From options """ + + buffer_size = network_options.logs.buffer_size + + times = DoubleArray1D( + array=np.full( + shape=buffer_size, + fill_value=0, + dtype=NPDTYPE, + ) + ) + states = NetworkLogStates.from_options(network_options) + + connectivity = NetworkConnectivity.from_options(network_options) + + noise = NetworkNoise.from_options(network_options) + + outputs = DoubleArray2D( + array=np.full( + shape=(buffer_size, len(network_options.nodes)), + fill_value=0, + dtype=NPDTYPE, + ) + ) + + external_inputs = DoubleArray2D( + array=np.full( + shape=(buffer_size, len(network_options.nodes)), + fill_value=0, + dtype=NPDTYPE, + ) + ) + + nodes = Nodes(network_options, states, outputs, external_inputs) + + return cls( + times=times, + states=states, + connectivity=connectivity, + outputs=outputs, + external_inputs=external_inputs, + noise=noise, + nodes=nodes, + ) + + def to_dict(self, iteration: int = None) -> Dict: + """Convert data to dictionary""" + return { + 'times': to_array(self.times.array), + 'states': self.states.to_dict(), + 'connectivity': self.connectivity.to_dict(), + 'outputs': to_array(self.outputs.array), + 'external_inputs': to_array(self.external_inputs.array), + 'noise': self.noise.to_dict(), + 'nodes': {node.name: node.to_dict() for node in self.nodes}, + } + + def to_file(self, filename: str, iteration: int = None): + """Save data to file""" + pylog.info('Exporting to dictionary') + data_dict = self.to_dict(iteration) + pylog.info('Saving data to %s', filename) + dict_to_hdf5(filename=filename, data=data_dict) + pylog.info('Saved data to %s', filename) + + +class Nodes: + """ Nodes """ + + def __init__(self, network_options: NetworkOptions, states, outputs, external_inputs): + self._nodes = [] + self._name_to_index = {} + + for idx, node_opt in enumerate(network_options.nodes): + node = NodeData( + node_opt.name, + NodeStates(states, idx, node_opt.name), + NodeOutput(outputs, idx, node_opt.name), + NodeExternalInput(external_inputs, idx, node_opt.name), + ) + self._nodes.append(node) + self._name_to_index[node_opt.name] = idx + + def __getitem__(self, key: str): + # Access by index + if isinstance(key, int): + return self._nodes[key] + # Access by name + return self._nodes[self._name_to_index[key]] + + def __len__(self): + return len(self._nodes) + + def __iter__(self): + return iter(self._nodes) + + def names(self): + return list(self._name_to_index.keys()) + + +class NodeStates: + def __init__(self, network_states, node_index: int, node_name: str): + self.node_name = node_name + self._network_states = network_states + self._node_index = node_index + self.ndim = self._network_states.array.ndim + start = self._network_states.indices[self._node_index] + end = self._network_states.indices[self._node_index + 1] + if start == end: + self._has_states = False + else: + self._start_idx = start + self._end_idx = end + self._has_states = True + + @property + def values(self): + if not self._has_states: + raise ValueError(f"Node {self.node_name} has no states") + + if self.ndim == 1: + return self._network_states.array[self._start_idx:self._end_idx] + return self._network_states.array[:, self._start_idx:self._end_idx] + + @values.setter + def values(self, v: np.ndarray): + if not self._has_states: + raise ValueError(f"Node {self.node_name} has no states to be set") + assert v.dtype == np.float_, "Values must be of type double/float" + + if self.ndim == 1: + self._network_states.array[self._start_idx:self._end_idx] = v[:] + return + + raise AttributeError("Cannot assign to values in logging mode.") + + +class NodeOutput: + def __init__(self, network_outputs, node_index: str, node_name: str): + self.node_name = node_name + self._network_outputs = network_outputs + self.ndim = self._network_outputs.array.ndim + self._node_index = node_index + + @property + def values(self): + if self.ndim == 1: + return self._network_outputs.array[self._node_index] + return self._network_outputs.array[:, self._node_index] + + @values.setter + def values(self, v: float): + + if self.ndim == 1: + self._network_outputs.array[self._node_index] = v + return + raise AttributeError("Cannot assign to values in logging mode.") + + +class NodeExternalInput: + def __init__(self, network_external_inputs, node_index: int, node_name: str): + self.node_name = node_name + self._network_external_inputs = network_external_inputs + self.ndim = self._network_external_inputs.array.ndim + self._node_index = node_index + + @property + def values(self): + if self.ndim == 1: + return self._network_external_inputs.array[self._node_index] + return self._network_external_inputs.array[:, self._node_index] + + @values.setter + def values(self, v: float): + if self.ndim == 1: + self._network_external_inputs.array[self._node_index] = v + return + raise AttributeError("Cannot assign to values in logging mode.") + + +class NodeData: + """ Accesssor for Node Data """ + def __init__( + self, + name: str, + states: "NodeStates", + output: "NodeOutput", + external_input: "NodeExternalInput", + ): + super().__init__() + self.name = name + self.states = states + self.output = output + self.external_input = external_input diff --git a/farms_network/core/data_cy.pxd b/farms_network/core/data_cy.pxd new file mode 100644 index 0000000..1ae87c0 --- /dev/null +++ b/farms_network/core/data_cy.pxd @@ -0,0 +1,89 @@ +""" Network Data """ + + +from farms_core.array.array_cy cimport (DoubleArray1D, DoubleArray2D, IntegerArray1D) + +include 'types.pxd' + + +cdef class NetworkDataCy: + cdef: + public DoubleArray1D times + public NetworkStatesCy states + public NetworkStatesCy derivatives + public DoubleArray1D external_inputs + public DoubleArray1D outputs + public DoubleArray1D tmp_outputs + public NetworkConnectivityCy connectivity + public NetworkNoiseCy noise + + +cdef class NetworkLogCy: + cdef: + public DoubleArray1D times + public NetworkLogStatesCy states + public DoubleArray2D external_inputs + public DoubleArray2D outputs + public NetworkConnectivityCy connectivity + public NetworkNoiseCy noise + + +cdef class NetworkStatesCy(DoubleArray1D): + """ State array """ + cdef: + public UITYPEv1 indices + + +cdef class NetworkLogStatesCy(DoubleArray2D): + """ State array for logging """ + cdef: + public UITYPEv1 indices + + +cdef class NetworkConnectivityCy: + """ Network connectivity array """ + cdef: + public DTYPEv1 weights + public UITYPEv1 node_indices + public UITYPEv1 edge_indices + public UITYPEv1 index_offsets + + +cdef class NetworkNoiseCy: + """ Noise data array """ + cdef: + public DTYPEv1 states + public UITYPEv1 indices + public DTYPEv1 drift + public DTYPEv1 diffusion + public DTYPEv1 outputs + + +# Network data will hold the necessary computational data +cdef class NetworkData: + + cdef: + # Time + public DoubleArray1D times + + # States + public DoubleArray1D curr_states + DoubleArray1D tmp_states + public UITYPEv1 state_indices + + # Derivatives + public DoubleArray1D curr_derivatives + DoubleArray1D tmp_derivatives + + # Outputs + public DoubleArray1D curr_outputs + DoubleArray1D tmp_outputs + + # External inputs + public DoubleArray1D external_inputs + + # Network connectivity + public NetworkConnectivityCy connectivity + + # Noise + public NetworkNoiseCy noise diff --git a/farms_network/core/data_cy.pyx b/farms_network/core/data_cy.pyx new file mode 100644 index 0000000..4b6b88c --- /dev/null +++ b/farms_network/core/data_cy.pyx @@ -0,0 +1,94 @@ +""" Core Data """ + +cimport numpy as cnp + +import numpy as np + + +################################## +########## Network data ########## +################################## +cdef class NetworkDataCy: + """ Network data """ + + def __init__(self): + """ network data initialization """ + + super().__init__() + + +cdef class NetworkLogCy: + """ Network Log """ + + def __init__(self): + """ Network Logs initialization """ + + super().__init__() + + +cdef class NetworkStatesCy(DoubleArray1D): + """ State array """ + + def __init__( + self, + array: NDArray[(Any,), np.double], + indices: NDArray[(Any,), np.uintc], + ): + super().__init__(array) + assert self.array.is_c_contig() + self.indices = np.array(indices, dtype=np.uintc) + assert self.indices.is_c_contig() + + +cdef class NetworkLogStatesCy(DoubleArray2D): + """ State array """ + + def __init__( + self, + array: NDArray[(Any, Any), np.double], + indices: NDArray[(Any,), np.uintc], + ): + super().__init__(array) + assert self.array.is_c_contig() + self.indices = np.array(indices, dtype=np.uintc) + assert self.indices.is_c_contig() + + +cdef class NetworkConnectivityCy: + """ Connectivity array """ + + def __init__( + self, + node_indices: NDArray[(Any,), np.uintc], + edge_indices: NDArray[(Any,), np.uintc], + weights: NDArray[(Any,), np.double], + index_offsets: NDArray[(Any,), np.uintc], + ): + super().__init__() + self.node_indices = np.array(node_indices, dtype=np.uintc) + assert self.node_indices.is_c_contig() + self.edge_indices = np.array(edge_indices, dtype=np.uintc) + assert self.edge_indices.is_c_contig() + self.weights = np.array(weights, dtype=np.double) + assert self.weights.is_c_contig() + self.index_offsets = np.array(index_offsets, dtype=np.uintc) + assert self.index_offsets.is_c_contig() + + +cdef class NetworkNoiseCy: + """ Noise data """ + + def __init__( + self, + states: NDArray[(Any,), np.double], + indices: NDArray[(Any,), np.uintc], + drift: NDArray[(Any,), np.double], + diffusion: NDArray[(Any,), np.double], + outputs: NDArray[(Any,), np.double], + ): + super().__init__() + self.states = np.array(states, dtype=np.double) + self.indices = np.array(indices, dtype=np.uintc) + self.drift = np.array(drift, dtype=np.double) + self.diffusion = np.array(diffusion, dtype=np.double) + self.outputs = np.array(outputs, dtype=np.double) diff --git a/farms_network/core/edge.py b/farms_network/core/edge.py new file mode 100644 index 0000000..d5a3a38 --- /dev/null +++ b/farms_network/core/edge.py @@ -0,0 +1,42 @@ +""" Edge """ + +from farms_network.core.edge_cy import EdgeCy +from farms_network.core.options import EdgeOptions +from farms_network.models import EdgeTypes +from typing import Dict, Type + + +class Edge: + """ Interface to edge class """ + + CY_EDGE_CLASS: Type[EdgeCy] = None + + def __init__(self, source: str, target: str, edge_type: EdgeTypes, model, **kwargs): + self.model = model + self.source: str = source + self.target: str = target + self._edge_cy = self._create_cy_edge(edge_type, **kwargs) + + def _create_cy_edge(self, edge_type, **kwargs) -> EdgeCy: + if self.CY_EDGE_CLASS is None: + return EdgeCy(edge_type, **kwargs) + return self.CY_EDGE_CLASS(edge_type, **kwargs) + + @property + def edge_type(self): + return self._edge_cy.type + + @edge_type.setter + def edge_type(self, edge_type: EdgeTypes): + self._edge_cy.type = edge_type + + @classmethod + def from_options(cls, edge_options: EdgeOptions): + """ From edge options """ + model = edge_options.model + source: str = edge_options.source + target: str = edge_options.target + edge_type: EdgeTypes = edge_options.type + # Need to generate parameters based on the model specified + parameter_options: Dict = {} if edge_options.parameters is None else edge_options.parameters + return cls(source, target, edge_type, model, **parameter_options) diff --git a/farms_network/core/edge_cy.pxd b/farms_network/core/edge_cy.pxd new file mode 100644 index 0000000..70e1a74 --- /dev/null +++ b/farms_network/core/edge_cy.pxd @@ -0,0 +1,25 @@ +""" Edge Base Struture """ + + +cdef enum: + + #EDGE TYPES + GENERIC = 0 + EXCITATORY = 1 + INHIBITORY = 2 + CHOLINERGIC = 3 + PHASE_COUPLING = 4 + + +cdef struct edge_t: + unsigned int type # Type of connection + # Edge parameters + unsigned int nparams + void* params + + +cdef class EdgeCy: + """ Python interface to Edge C-Structure """ + + cdef: + edge_t* _edge diff --git a/farms_network/core/edge_cy.pyx b/farms_network/core/edge_cy.pyx new file mode 100644 index 0000000..8d9bf33 --- /dev/null +++ b/farms_network/core/edge_cy.pyx @@ -0,0 +1,42 @@ +""" Edge """ + +from libc.stdio cimport printf +from libc.stdlib cimport free, malloc +from libc.string cimport strdup + + +cpdef enum types: + + #EDGE TYPES + generic = GENERIC + excitatory = EXCITATORY + inhibitory = INHIBITORY + cholinergic = CHOLINERGIC + phase_coupling = PHASE_COUPLING + + +cdef class EdgeCy: + """ Python interface to Edge C-Structure""" + + def __cinit__(self, edge_type: str, **kwargs): + self._edge = malloc(sizeof(edge_t)) + if self._edge is NULL: + raise MemoryError("Failed to allocate memory for edge_t") + self._edge.type = types[edge_type] + self._edge.params = NULL + self._edge.nparams = 0 + + def __dealloc__(self): + if self._edge is not NULL: + free(self._edge) + + def __init__(self, edge_type: str, **kwargs): + ... + + @property + def type(self): + return self._edge.type + + @property + def nparams(self): + return self._edge.nparams diff --git a/farms_network/core/network.py b/farms_network/core/network.py new file mode 100644 index 0000000..1c34904 --- /dev/null +++ b/farms_network/core/network.py @@ -0,0 +1,168 @@ +""" Network """ + +from typing import List, Optional + +import numpy as np +from farms_core import pylog +from farms_network.numeric.integrators_cy import RK4Solver + +from ..models.factory import EdgeFactory, NodeFactory +from ..noise.ornstein_uhlenbeck import OrnsteinUhlenbeck +from .data import NetworkData, NetworkLog +from .edge import Edge +from .network_cy import NetworkCy +from .node import Node +from .options import (EdgeOptions, IntegrationOptions, NetworkOptions, + NodeOptions) + + +class Network: + """ Network class using composition with NetworkCy """ + + def __init__(self, network_options: NetworkOptions): + """ Initialize network with composition approach """ + self.options = network_options + + # Sort nodes based on node-type + self.options.nodes = sorted( + self.options.nodes, key=lambda node: node["model"] + ) + + # Core network data and Cython implementation + self.data = NetworkData.from_options(self.options) + self.log = NetworkLog.from_options(self.options) + + self._network_cy = NetworkCy( + nnodes=len(self.options.nodes), + nedges=len(self.options.edges), + data=self.data, + log=self.log + ) + + # Python-level collections + self.nodes: List[Node] = [] + self.edges: List[Edge] = [] + + # Setup the network + self._setup_network() + + # Internal default solver + self.solver: RK4Solver = None + + # Iteration + if self.options.integration: + self.timestep: float = self.options.integration.timestep + self.iteration: int = 0 + self.n_iterations: int = self.options.integration.n_iterations + else: + raise ValueError("Integration options missing!") + + def step(self, time): + """ Step the network integration """ + self.solver.step( + self._network_cy, + time, + self.data.states.array + ) + # Update noise + self._network_cy.update_noise(time, self.timestep) + + # Update logs + def update_logs(self, time): + if self.options.logs.enable: + self._network_cy.update_logs(time) + + def run(self, n_iterations: Optional[int] = None): + """ Run the network for n_iterations """ + if n_iterations is None: + n_iterations = self.n_iterations + + for iteration in range(n_iterations): + self.step(iteration*self.timestep) + self.update_logs(iteration*self.timestep) + + def _setup_network(self): + """ Setup network nodes and edges """ + pylog.info(f"Number of nodes in network: {len(self.options.nodes)}") + pylog.info(f"Number of edges in network: {len(self.options.edges)}") + # Create Python nodes + nstates = 0 + for index, node_options in enumerate(self.options.nodes): + python_node = self._generate_node(node_options) + python_node._node_cy.ninputs = len( + self.data.connectivity.node_indices[ + self.data.connectivity.index_offsets[index]:self.data.connectivity.index_offsets[index+1] + ] + ) if self.data.connectivity.index_offsets else 0 + nstates += python_node.nstates + self.nodes.append(python_node) + + # Create Python edges + for edge_options in self.options.edges: + python_edge = self._generate_edge(edge_options) + self.edges.append(python_edge) + + self._network_cy.nstates = nstates + + # Noise + self.sde_noise = OrnsteinUhlenbeck(self.options) + + # Pass Python nodes/edges to Cython layer for C struct setup + self._network_cy.setup_network( + self.data, self.nodes, self.edges, self.sde_noise._ou_cy + ) + + # Initialize states + self._initialize_states() + + def setup_integrator(self): + """ Setup numerical integrators """ + self.solver = RK4Solver(self._network_cy.nstates, self.options.integration.timestep) + + def _initialize_states(self): + """ Initialize node states from options """ + for j, node_opts in enumerate(self.options.nodes): + if node_opts.state: + for state_index, index in enumerate( + range(self.data.states.indices[j], self.data.states.indices[j+1]) + ): + self.data.states.array[index] = node_opts.state.initial[state_index] + + @staticmethod + def _generate_node(node_options: NodeOptions) -> Node: + """ Generate a node from options """ + NodeClass = NodeFactory.create(node_options.model) + return NodeClass.from_options(node_options) + + @staticmethod + def _generate_edge(edge_options: EdgeOptions) -> Edge: + """ Generate an edge from options """ + EdgeClass = EdgeFactory.create(edge_options.model) + return EdgeClass.from_options(edge_options) + + def get_ode_func(self): + """ Get ODE function for external integration """ + return self._network_cy.ode_func + + # Delegate properties to Cython implementation + @property + def nnodes(self) -> int: + return self._network_cy.nnodes + + @property + def nedges(self) -> int: + return self._network_cy.nedges + + @property + def nstates(self) -> int: + return self._network_cy.nstates + + # Factory methods + @classmethod + def from_options(cls, options: NetworkOptions): + """ Initialize network from NetworkOptions """ + return cls(options) + + def to_options(self) -> NetworkOptions: + """ Return NetworkOptions from network """ + return self.options diff --git a/farms_network/core/network_cy.pxd b/farms_network/core/network_cy.pxd new file mode 100644 index 0000000..13639a8 --- /dev/null +++ b/farms_network/core/network_cy.pxd @@ -0,0 +1,80 @@ +cimport numpy as cnp + +from ..numeric.integrators_cy cimport EulerMaruyamaSolver, RK4Solver +from ..numeric.system_cy cimport ODESystem, SDESystem +from ..noise.ornstein_uhlenbeck_cy cimport OrnsteinUhlenbeckCy +from .data_cy cimport NetworkDataCy, NetworkLogCy +from .edge_cy cimport EdgeCy, edge_t +from .node_cy cimport NodeCy, node_t, node_inputs_t + + +cdef struct noise_t: + # States + int nstates + double* states + double* drift + double* diffusion + const unsigned int* indices + # Outputs + double* outputs + + +cdef struct network_t: + # info + unsigned int nnodes + unsigned int nedges + unsigned int nstates + + # nodes list + node_t** nodes + # edges list + edge_t** edges + + # ODE + double* states + unsigned int* states_indices + + double* derivatives + unsigned int* derivatives_indices + + double* outputs + double* tmp_outputs + + double* external_inputs + + unsigned int* node_indices + unsigned int* edge_indices + double* weights + unsigned int* index_offsets + + # Noise + noise_t noise + + +cdef class NetworkCy(ODESystem): + """ Python interface to Network ODE """ + + cdef: + network_t *_network + public list nodes + public list edges + NetworkDataCy data + NetworkLogCy log + + public unsigned int iteration + const unsigned int n_iterations + const unsigned int buffer_size + const double timestep + + OrnsteinUhlenbeckCy sde_noise + + cdef void evaluate(self, double time, double[:] states, double[:] derivatives) noexcept + cdef void c_update_noise(self, double time, double timestep) noexcept + # cpdef void update_iteration(self) + + +cdef class NetworkNoiseCy(SDESystem): + """ Interface to stochastic noise in the network """ + + cdef void evaluate_a(self, double time, double[:] states, double[:] drift) noexcept + cdef void evaluate_b(self, double time, double[:] states, double[:] diffusion) noexcept diff --git a/farms_network/core/network_cy.pyx b/farms_network/core/network_cy.pyx new file mode 100644 index 0000000..68b8442 --- /dev/null +++ b/farms_network/core/network_cy.pyx @@ -0,0 +1,316 @@ +""" Network """ + +include "types.pxd" + +import numpy as np + +from libc.stdio cimport printf +from libc.stdlib cimport free, malloc +from libc.string cimport strdup + +from ..models.factory import NodeFactory + +from libc.math cimport sqrt as csqrt + +from ..noise.ornstein_uhlenbeck_cy cimport OrnsteinUhlenbeckCy + +from .data import NetworkData, NetworkStates + +from .data_cy cimport (NetworkConnectivityCy, NetworkDataCy, NetworkNoiseCy, + NetworkStatesCy) +from .node_cy cimport processed_inputs_t + +from typing import List + +from .options import NetworkOptions + + +cdef inline void ode( + double time, + double[:] states, + double[:] derivatives, + network_t* c_network, +) noexcept: + """ C Implementation to compute full network state """ + + cdef node_t* __node + cdef node_t** c_nodes = c_network.nodes + cdef edge_t** c_edges = c_network.edges + cdef unsigned int nnodes = c_network.nnodes + cdef unsigned int j + cdef processed_inputs_t processed_inputs = { + 'generic': 0.0, + 'excitatory': 0.0, + 'inhibitory': 0.0, + 'cholinergic': 0.0, + 'phase_coupling': 0.0 + } + cdef node_inputs_t node_inputs + + node_inputs.network_outputs = c_network.outputs + # It is important to use the states passed to the function and not from the data.states + cdef double* states_ptr = &states[0] + cdef double* derivatives_ptr = &derivatives[0] + + # Noise + cdef double* noise = c_network.noise.outputs + + for j in range(nnodes): + __node = c_nodes[j] + + # Prepare node context + node_inputs.node_indices = c_network.node_indices + c_network.index_offsets[j] + node_inputs.edge_indices = c_network.edge_indices + c_network.index_offsets[j] + node_inputs.weights = c_network.weights + c_network.index_offsets[j] + node_inputs.external_input = c_network.external_inputs[j] + + node_inputs.ninputs = __node.ninputs + node_inputs.node_index = j + + # Compute the inputs from all nodes + processed_inputs.generic = 0.0 + processed_inputs.excitatory = 0.0 + processed_inputs.inhibitory = 0.0 + processed_inputs.cholinergic = 0.0 + processed_inputs.phase_coupling = 0.0 + + __node.input_tf( + time, + states_ptr + c_network.states_indices[j], + node_inputs, + c_nodes[j], + c_edges, + &processed_inputs + ) + + if __node.is_statefull: + # Compute the ode + __node.ode( + time, + states_ptr + c_network.states_indices[j], + derivatives_ptr + c_network.states_indices[j], + processed_inputs, + noise[j], + c_nodes[j] + ) + # Check for writing to proper outputs array + c_network.tmp_outputs[j] = __node.output_tf( + time, + states_ptr + c_network.states_indices[j], + processed_inputs, + noise[j], + c_nodes[j], + ) + + +cdef inline void _noise_states_to_output( + double[:] states, + unsigned int[:] indices, + double[:] outputs, +) noexcept: + """ Copy noise states data to noise outputs """ + cdef int n_indices = indices.shape[0] + cdef int index + for index in range(n_indices): + outputs[indices[index]] = states[index] + + +cdef class NetworkCy(ODESystem): + """ Python interface to Network ODE """ + + def __cinit__(self, nnodes: int, nedges: int, data: NetworkDataCy, log: NetworkLogCy): + # Memory allocation only + self._network = malloc(sizeof(network_t)) + if self._network is NULL: + raise MemoryError("Failed to allocate memory for Network") + + self._network.nnodes = nnodes + self._network.nedges = nedges + + # Allocate C arrays + self._network.nodes = malloc(self.nnodes * sizeof(node_t*)) + if self._network.nodes is NULL: + raise MemoryError("Failed to allocate memory for Network nodes") + self._network.edges = malloc(self.nedges * sizeof(edge_t*)) + if self._network.edges is NULL: + raise MemoryError("Failed to allocate memory for Network edges") + + # Initialize network context + self.data = data + if self.data.states.array.size > 0: + self._network.states = &self.data.states.array[0] + else: + self._network.states = NULL # No stateful + + if self.data.states.indices.size > 0: + self._network.states_indices = &self.data.states.indices[0] + else: + assert self._network.states == NULL + self._network.states_indices = NULL + + # if self.data.derivatives.array.size > 0: + # self._network.derivatives = &self.data.derivatives.array[0] + # else: + # assert self._network.states == NULL + self._network.derivatives = NULL + + if self.data.external_inputs.array.size > 0: + self._network.external_inputs = &self.data.external_inputs.array[0] + else: + raise ValueError("External inputs array cannot be of size 0") + + if self.data.outputs.array.size > 0: + self._network.outputs = &self.data.outputs.array[0] + else: + raise ValueError("Outputs array cannot be of size 0") + + if self.data.tmp_outputs.array.size > 0: + self._network.tmp_outputs = &self.data.tmp_outputs.array[0] + else: + raise ValueError("Temp Outputs array cannot be of size 0") + + if self.data.connectivity.node_indices.size > 0: + self._network.node_indices = &self.data.connectivity.node_indices[0] + else: + raise ValueError("Connectivity array cannot be of size 0") + + if self.data.connectivity.edge_indices.size > 0: + self._network.edge_indices = &self.data.connectivity.edge_indices[0] + else: + raise ValueError("Connectivity array cannot be of size 0") + + if self.data.connectivity.weights.size > 0: + self._network.weights = &self.data.connectivity.weights[0] + else: + raise ValueError("Connectivity array cannot be of size 0") + + if self.data.connectivity.index_offsets.size > 0: + self._network.index_offsets = &self.data.connectivity.index_offsets[0] + else: + raise ValueError("Connectivity array cannot be of size 0") + + # Noise + if self.data.noise.states.size > 0: + self._network.noise.states = &self.data.noise.states[0] + else: + self._network.noise.states = NULL + + if self.data.noise.drift.size > 0: + self._network.noise.drift = &self.data.noise.drift[0] + else: + self._network.noise.drift = NULL + + if self.data.noise.diffusion.size > 0: + self._network.noise.diffusion = &self.data.noise.diffusion[0] + else: + self._network.noise.diffusion = NULL + + if self.data.noise.indices.size > 0: + self._network.noise.indices = &self.data.noise.indices[0] + else: + self._network.noise.indices = NULL + + if self.data.noise.outputs.size > 0: + self._network.noise.outputs = &self.data.noise.outputs[0] + else: + self._network.noise.outputs = NULL + + + def __init__(self, nnodes, nedges, data: NetworkDataCy, log: NetworkLogCy): + """ Initialize """ + super().__init__() + self.log = log + self.iteration = 0 + + def __dealloc__(self): + """ Deallocate any manual memory as part of clean up """ + if self._network.nodes is not NULL: + free(self._network.nodes) + self._network.nodes = NULL + if self._network.edges is not NULL: + free(self._network.edges) + self._network.edges = NULL + if self._network is not NULL: + free(self._network) + self._network = NULL + + def setup_network( + self, + data: NetworkData, + nodes: List[NodeCy], + edges: List[EdgeCy], + sde_noise: SDESystem=None, + ): + """ Setup network """ + + for index, node in enumerate(nodes): + self._network.nodes[index] = ((node._node_cy)._node) + + for index, edge in enumerate(edges): + self._network.edges[index] = ((edge._edge_cy)._edge) + + self.sde_noise = sde_noise + + cdef void evaluate(self, double time, double[:] states, double[:] derivatives) noexcept: + """ Evaluate the ODE """ + # Update network ODE + ode(time, states, derivatives, self._network) + # Swap the temporary outputs + self.data.outputs.array[:] = self.data.tmp_outputs.array[:] + + cdef void c_update_noise(self, double time, double timestep) noexcept: + """ Update """ + if self.sde_noise is not None: + self.sde_noise.evaluate_a(time, self.data.noise.states, self.data.noise.drift) + self.sde_noise.evaluate_b(time, self.data.noise.states, self.data.noise.diffusion) + for j in range(self.sde_noise.n_dim): + self.data.noise.states[j] += ( + self.data.noise.drift[j]*timestep + csqrt(timestep)*self.data.noise.diffusion[j] + ) + self.data.noise.outputs[self.data.noise.indices[j]] = self.data.noise.states[j] + + def update_noise(self, double time, double timestep): + self.c_update_noise(time, timestep) + + def ode_func(self, double time, double[:] states): + """ Evaluate the ODE """ + self.evaluate(time, states, self.data.derivatives.array) + return self.data.derivatives.array + + def update_logs(self, time: float): + """ Updated logs to copy current iteration data into logs """ + self.iteration += 1 + self.log.times.array[self.iteration] = time + self.log.states.array[self.iteration, :] = self.data.states.array[:] + self.log.external_inputs.array[self.iteration, :] = self.data.external_inputs.array[:] + self.log.outputs.array[self.iteration, :] = self.data.outputs.array[:] + + @property + def nnodes(self): + """ Number of nodes in the network """ + return self._network.nnodes + + @property + def nedges(self): + """ Number of edges in the network """ + return self._network.nedges + + @property + def nstates(self): + """ Number of states in the network """ + return self._network.nstates + + @nstates.setter + def nstates(self, value: int): + """ Number of network states """ + self._network.nstates = value + + @property + def noise_nstates(self): + """ Number of noise states in the network """ + return self._network.noise.nstates + + @noise_nstates.setter + def noise_nstates(self, value: int): + """ Number of network noise states """ + self._network.noise.nstates = value diff --git a/farms_network/core/node.py b/farms_network/core/node.py new file mode 100644 index 0000000..cc71a04 --- /dev/null +++ b/farms_network/core/node.py @@ -0,0 +1,90 @@ +""" Node """ + + +from abc import ABC, abstractmethod +from typing import Any, Dict, Type + +from farms_network.core.node_cy import NodeCy +from farms_network.core.options import NodeOptions + + +class Node(ABC): + + CY_NODE_CLASS: Type[NodeCy] = None + + def __init__(self, name: str, model: str, **parameters): + self.name: str = name # Unique name of the node + self.model: str = model # Type of the model (e.g., "empty") + self._node_cy = self._create_cy_node(**parameters) + + def _create_cy_node(self, **kwargs) -> NodeCy: + if self.CY_NODE_CLASS is None: + raise NotImplementedError("Must define CY_NODE_CLASS") + return self.CY_NODE_CLASS(**kwargs) + + # General node properties + @property + def nstates(self): + return self._node_cy.nstates + + @property + def nparams(self): + return self._node_cy.nparams + + @property + def ninputs(self): + return self._node_cy.ninputs + + @property + def is_statefull(self): + return self._node_cy.is_statefull + + def print_parameters(self): + return self._node_cy.parameters + + def input_tf(self): + """ Input transfer function """ + pass + + def ode(self, time, states, derivatives, external_input, network_outputs, inputs, weights, noise): + """ ODE computation """ + return self._node_cy.ode( + time, states, derivatives, external_input, + network_outputs, inputs, weights, noise + ) + + def output_tf(self, time, states, input_val, noise): + """ ODE computation """ + return self._node_cy.output_tf(time, states, input_val, noise) + + @classmethod + def from_options(cls, node_options: NodeOptions): + """ From node options """ + name: str = node_options.name + parameters = node_options.parameters + if parameters is None: + parameters = {} + return cls(name, **parameters) + + def to_options(self): + """ To node options """ + name: str = node_options.name + parameters = node_options.parameters + return cls(name, **parameters) + + def debug_info(self): + """ Get debug information about the node """ + return { + 'class': self.__class__.__name__, + 'model': self.model, + 'name': self.name, + 'nstates': self.nstates, + 'ninputs': self.ninputs, + 'nparams': self.nparams, + 'is_statefull': self.is_statefull, + 'initialized': self._initialized, + 'has_ode_func': self._node.ode_func is not NULL, + 'has_output_func': self._node.output_func is not NULL, + 'has_params': self._node.params is not NULL, + 'parameters': self.parameters + } diff --git a/farms_network/core/node_cy.pxd b/farms_network/core/node_cy.pxd new file mode 100644 index 0000000..da37a95 --- /dev/null +++ b/farms_network/core/node_cy.pxd @@ -0,0 +1,100 @@ +""" Node Base Struture. """ + +from farms_network.core.edge_cy cimport edge_t + + +cdef struct node_inputs_t: + double* network_outputs # Network level outputs + double* weights # Network connection weights + unsigned int* node_indices # Which nodes provide input + unsigned int* edge_indices # Which edges provide input + double external_input # external input + int ninputs # Number of inputs + unsigned int node_index # This node's index (for self-reference) + + +cdef struct processed_inputs_t: + double generic + double excitatory + double inhibitory + double cholinergic + double phase_coupling + + +# Input transfer function +# Receives n-inputs and produces one output to be fed into ode/output_tf +cdef void base_input_tf( + double time, + const double* states, + const node_inputs_t inputs, + const node_t* node, + const edge_t** edges, + processed_inputs_t* out +) + + +# ODE to compute the neural dynamics based on current state and inputs +cdef void base_ode( + double time, + const double* states, + double* derivatives, + processed_inputs_t input_vals, + double noise, + const node_t* node, +) + + +# Output transfer function based on current state +cdef double base_output_tf( + double time, + const double* states, + processed_inputs_t input_vals, + double noise, + const node_t* node, +) + + +cdef struct node_t: + # Generic parameters + unsigned int nstates # Number of state variables in the node. + unsigned int ninputs # Number of inputs to the node within the network + unsigned int nparams # Number of parameters in the node + + char* model # Type of the model (e.g., "empty"). + char* name # Unique name of the node. + + bint is_statefull # Flag indicating whether the node is stateful. (ODE) + + # Parameters + void* params # Pointer to the parameters of the node. + + # Functions + void input_tf( + double time, + const double* states, + const node_inputs_t inputs, + const node_t* node, + const edge_t** edges, + processed_inputs_t* out + ) noexcept + void ode( + double time, + const double* states, + double* derivatives, + processed_inputs_t input_vals, + double noise, + const node_t* node, + ) noexcept + double output_tf( + double time, + const double* states, + processed_inputs_t input_vals, + double noise, + const node_t* node, + ) noexcept + + +cdef class NodeCy: + """ Interface to Node C-Structure """ + cdef: + node_t* _node diff --git a/farms_network/core/node_cy.pyx b/farms_network/core/node_cy.pyx new file mode 100644 index 0000000..a74323a --- /dev/null +++ b/farms_network/core/node_cy.pyx @@ -0,0 +1,101 @@ +""" Node """ + +from typing import Optional + +from libc.stdio cimport printf +from libc.stdlib cimport free, malloc +from libc.string cimport strdup + +from farms_network.core.options import NodeOptions +from farms_network.models import Models + + +# Input transfer function +# Receives n-inputs and produces one output to be fed into ode/output_tf +cdef void base_input_tf( + double time, + const double* states, + const node_inputs_t inputs, + const node_t* node, + const edge_t** edges, + processed_inputs_t* out +) noexcept: + raise NotImplementedError("input_tf must be implemented by node type") + +# ODE to compute the neural dynamics based on current state and inputs +cdef void base_ode( + double time, + const double* states, + double* derivatives, + processed_inputs_t input_vals, + double noise, + const node_t* node, +) noexcept: + raise NotImplementedError("ode must be implemented by node type") + + +# Output transfer function based on current state +cdef double base_output_tf( + double time, + const double* states, + processed_inputs_t input_vals, + double noise, + const node_t* node, +) noexcept: + raise NotImplementedError("output_tf must be implemented by node type") + + +cdef class NodeCy: + """ Interface to Node C-Structure """ + + MODEL = Models.BASE.value + + def __cinit__(self, **kwargs): + self._node = malloc(sizeof(node_t)) + if self._node is NULL: + raise MemoryError("Failed to allocate memory for node_t") + self._node.nstates = 0 + self._node.input_tf = base_input_tf + self._node.ode = base_ode + self._node.output_tf = base_output_tf + + # Setup parameters + self._node.params = NULL + + def __init__(self, **kwargs): + ... + + def __dealloc__(self): + if self._node is not NULL: + free(self._node) + + # Property methods + @property + def nstates(self): + return self._node.nstates + + @property + def ninputs(self): + return self._node.ninputs + + @ninputs.setter + def ninputs(self, value: int): + self._node.ninputs = value + + @property + def nparams(self): + return self._node.nparams + + @property + def is_statefull(self): + return self._node.is_statefull + + def ode(self, time, double[:] states, double[:] derivatives, input_val, noise): + cdef double* states_ptr = &states[0] + cdef double* derivatives_ptr = &derivatives[0] + self._node.ode(time, states_ptr, derivatives_ptr, input_val, noise, self._node) + + def output_tf(self, time, double[:] states, input_val, noise): + """ Call C node output """ + cdef double* states_ptr = &states[0] + return self._node.output_tf(time, states_ptr, input_val, noise, self._node) diff --git a/farms_network/core/options.py b/farms_network/core/options.py new file mode 100644 index 0000000..5f978fd --- /dev/null +++ b/farms_network/core/options.py @@ -0,0 +1,1241 @@ +""" Options to configure the neural and network models """ + + +import time +from typing import Any, Dict, Iterable, List, Self, Type, Union + +from farms_core import pylog +from farms_core.options import Options +from farms_network.models import EdgeTypes, Models + + +########################### +# Node Base Class Options # +########################### +class NodeOptions(Options): + """ Base class for defining node options """ + MODEL = Models.BASE + + def __init__( + self, + name: str, + model: Union[str, Models] = MODEL, + parameters: "NodeParameterOptions" = None, + visual: "NodeVisualOptions" = None, + state: "NodeStateOptions" = None, + noise: "NoiseOptions" = None, + ): + """ Initialize """ + super().__init__() + self.name = name + if isinstance(model, Models): + model = Models.to_str(model) + elif not isinstance(model, str): + raise TypeError( + f"{model} is of {type(model)}. Needs to {type(Models)} or {type(str)}" + ) + self.model: str = model + self.parameters = parameters + self.visual = visual + self.state = state + self.noise = noise + + @classmethod + def from_options(cls, options: Dict): + """ Load from options """ + visual = options.get("visual") + return cls( + name=options["name"], + model=options.get("model", cls.MODEL), + parameters=options.get("parameters"), + state=options.get("state"), + noise=options.get("noise"), + visual=NodeVisualOptions.from_options(visual) if visual else None, + ) + + def __eq__(self, other): + if isinstance(other, NodeOptions): + return self.name == other.name + elif isinstance(other, str): + return self.name == other + return False + + def __hash__(self): + return hash(self.name) # Hash based on the node name (or any unique property) + + def __str__(self) -> str: + attrs_str = ',\n'.join(f'{attr}={getattr(self, attr)}' for attr in self.keys()) + return f"{self.__class__.__name__}(\n{attrs_str}\n)" + + def __repr__(self) -> str: + return self.__str__() + + +class NodeParameterOptions(Options): + """ Base class for node specific parameters """ + + @classmethod + def from_options(cls, kwargs: Dict): + """ From options """ + return cls(**kwargs) + + +class NodeStateOptions(Options): + """Base class for node-specific state options.""" + + STATE_NAMES: List[str] = [] # Override in subclasses + + def __init__(self, initial: List[float]): + super().__init__() + self.initial = list(initial) + + if len(self.initial) != len(self.STATE_NAMES): + raise ValueError( + f"Length mismatch: expected {len(self.STATE_NAMES)} values for {self.STATE_NAMES}, got {len(self.initial)}" + ) + + @classmethod + def from_options(cls, options: Dict) -> "NodeStateOptions": + """Create from a dict of options.""" + initial = options.get("initial") + if initial is None: + raise ValueError("Missing required 'initial' values in options") + return cls(initial=initial) + + def __repr__(self): + pairs = ", ".join(f"{n}={v}" for n, v in zip(self.STATE_NAMES, self.initial)) + return f"{self.__class__.__name__}({pairs})" + + +class NodeLogOptions(Options): + """ Log options for the node level """ + + def __init__(self, buffer_size: int, enable: bool, **kwargs): + super().__init__(**kwargs) + + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + @classmethod + def from_options(cls, kwargs: Dict): + """ From options """ + return cls(**kwargs) + + +class NodeVisualOptions(Options): + """ Base class for node visualization parameters """ + + def __init__(self, **kwargs): + super().__init__() + self.position: List[float] = kwargs.pop("position", [0.0, 0.0, 0.0]) + self.radius: float = kwargs.pop("radius", 1.0) + self.color: List[float] = kwargs.pop("color", [1.0, 0.0, 0.0]) + self.label: str = kwargs.pop("label", "n") + self.layer: str = kwargs.pop("layer", "background") + self.latex: dict = kwargs.pop("latex", "{}") + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + @classmethod + def from_options(cls, kwargs: Dict): + """ From options """ + return cls(**kwargs) + + +################ +# Edge Options # +################ +class EdgeOptions(Options): + """ Base class for defining edge options between nodes """ + + MODEL = Models.BASE + + def __init__(self, **kwargs): + """ Initialize """ + super().__init__() + self.source: str = kwargs.pop("source") + self.target: str = kwargs.pop("target") + self.weight: float = kwargs.pop("weight") + self.type = EdgeTypes.to_str(kwargs.pop("type")) + model = kwargs.pop("model", Models.BASE) + if isinstance(model, Models): + model = Models.to_str(model) + elif not isinstance(model, str): + raise TypeError( + f"{model} is of {type(model)}. Needs to {type(Models)} or {type(str)}" + ) + self.model: str = model + self.parameters: EdgeParameterOptions = kwargs.pop("parameters", EdgeParameterOptions()) + + self.visual: EdgeVisualOptions = kwargs.pop("visual", None) + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + def __eq__(self, other): + if isinstance(other, EdgeOptions): + return ( + (self.source == other.source) and + (self.target == other.target) + ) + return False + + @classmethod + def from_options(cls, kwargs: Dict): + """ From options """ + + options = {} + options["source"] = kwargs["source"] + options["target"] = kwargs["target"] + options["weight"] = kwargs["weight"] + options["type"] = kwargs["type"] + options["parameters"] = EdgeParameterOptions.from_options( + kwargs["parameters"] + ) + if visual := kwargs.get("visual"): + options["visual"] = EdgeVisualOptions.from_options(visual) + else: + options["visual"] = None + return cls(**options) + + def __str__(self) -> str: + attrs_str = ',\n'.join(f'{attr}={getattr(self, attr)}' for attr in self.keys()) + return f"{self.__class__.__name__}(\n{attrs_str}\n)" + + def __repr__(self) -> str: + return self.__str__() + + +class EdgeParameterOptions(Options): + """ Base class for edge specific parameters """ + + def __init__(self, **kwargs): + super().__init__() + + @classmethod + def from_options(cls, kwargs: Dict): + """ From options """ + return cls(**kwargs) + + +class EdgeVisualOptions(Options): + """ Base class for edge visualization parameters """ + + def __init__(self, **kwargs): + super().__init__() + self.color: List[float] = kwargs.pop("color", [1.0, 0.0, 0.0]) + self.alpha: float = kwargs.pop("alpha", 1.0) + self.label: str = kwargs.pop("label", "") + self.layer: str = kwargs.pop("layer", "background") + self.latex: dict = kwargs.pop("latex", "{}") + + # New options for FancyArrowPatch compatibility + self.arrowstyle: str = kwargs.pop("arrowstyle", "->") + self.connectionstyle: str = kwargs.pop("connectionstyle", "arc3,rad=0.1") + self.linewidth: float = kwargs.pop("linewidth", 1.5) + self.edgecolor: List[float] = kwargs.pop("edgecolor", [0.0, 0.0, 0.0]) + + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + @classmethod + def from_options(cls, kwargs: Dict): + """ From options """ + return cls(**kwargs) + + +################# +# Noise Options # +################# +class NoiseOptions(Options): + """ Base class for node noise options """ + + NOISE_TYPES = ("additive",) + NOISE_MODELS = ("white", "ornstein_uhlenbeck") + + def __init__(self, **kwargs): + super().__init__() + self.type = kwargs.pop("type", NoiseOptions.NOISE_TYPES[0]) + assert self.type.lower() in NoiseOptions.NOISE_TYPES + self.model = kwargs.pop("model", None) + assert self.model.lower() in NoiseOptions.NOISE_MODELS + self.is_stochastic = kwargs.pop("is_stochastic") + + @classmethod + def from_options(cls, kwargs: Dict): + """ From options """ + noise_models = { + cls.NOISE_MODELS[0]: NoiseOptions, + cls.NOISE_MODELS[1]: OrnsteinUhlenbeckOptions + } + noise_model = kwargs.pop("model") + return noise_models[noise_model].from_options(kwargs) + + +class OrnsteinUhlenbeckOptions(NoiseOptions): + """ Options to OrnsteinUhlenbeckOptions """ + + def __init__(self, **kwargs): + """ Initialize """ + model = NoiseOptions.NOISE_MODELS[1] + is_stochastic = True + super().__init__(model=model, is_stochastic=is_stochastic) + self.mu: float = kwargs.pop("mu") + self.sigma: float = kwargs.pop("sigma") + self.tau: float = kwargs.pop("tau") + + @classmethod + def from_options(cls, kwargs: Dict): + """ From options """ + options = {} + options["mu"] = kwargs.pop("mu") + options["sigma"] = kwargs.pop("sigma") + options["tau"] = kwargs.pop("tau") + return cls(**options) + + @classmethod + def defaults(cls, **kwargs: Dict): + """ From options """ + options = {} + options["mu"] = kwargs.pop("mu", 0.0) + options["sigma"] = kwargs.pop("sigma", 0.005) + options["tau"] = kwargs.pop("tau", 10.0) + return cls(**options) + + +####################### +# Relay Model Options # +####################### +class RelayNodeOptions(NodeOptions): + """ Class to define the properties of Relay node model """ + MODEL = Models.RELAY + + def __init__(self, **kwargs): + """ Initialize """ + state = kwargs.pop("state", None) + parameters = kwargs.pop("parameters", None) + + assert state is None + assert parameters is None + super().__init__( + name=kwargs.pop("name"), + model=RelayNodeOptions.MODEL, + parameters=parameters, + visual=kwargs.pop("visual"), + state=state, + noise=kwargs.pop("noise"), + ) + self._nstates = 0 + self._nparameters = 0 + + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + @classmethod + def from_options(cls, kwargs: Dict): + """ Load from options """ + options = {} + options["name"] = kwargs.pop("name") + options["parameters"] = None + options["visual"] = kwargs.get("visual", None) + options["noise"] = kwargs.pop("noise", None) + return cls(**options) + + +######################## +# Linear Model Options # +######################## +class LinearNodeOptions(NodeOptions): + """ Class to define the properties of Linear node model """ + MODEL = Models.LINEAR + + def __init__(self, **kwargs): + """ Initialize """ + + state = kwargs.pop("state", None) + assert state is None + super().__init__( + name=kwargs.pop("name"), + model=LinearNodeOptions.MODEL, + parameters=kwargs.pop("parameters"), + visual=kwargs.pop("visual"), + state=state, + noise=kwargs.pop("noise"), + ) + self._nstates = 0 + self._nparameters = 2 + + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + @classmethod + def from_options(cls, kwargs: Dict): + """ Load from options """ + options = {} + options["name"] = kwargs.pop("name") + options["parameters"] = LinearParameterOptions.from_options( + kwargs["parameters"] + ) + options["visual"] = kwargs.get("visual", None) + options["noise"] = kwargs.pop("noise", None) + return cls(**options) + + +class LinearParameterOptions(NodeParameterOptions): + + def __init__(self, **kwargs): + super().__init__() + self.slope = kwargs.pop("slope") + self.bias = kwargs.pop("bias") + + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + @classmethod + def defaults(cls, **kwargs): + """ Get the default parameters for Linear Node model """ + + options = {} + options["slope"] = kwargs.pop("slope", 1.0) + options["bias"] = kwargs.pop("bias", 0.0) + return cls(**options) + + +###################### +# ReLU Model Options # +###################### +class ReLUNodeOptions(NodeOptions): + """ Class to define the properties of ReLU node model """ + + MODEL = Models.RELU + + def __init__(self, **kwargs): + """ Initialize """ + + state = kwargs.pop("state", None) + assert state is None + super().__init__( + name=kwargs.pop("name"), + model=ReLUNodeOptions.MODEL, + parameters=kwargs.pop("parameters"), + visual=kwargs.pop("visual"), + state=state, + noise=kwargs.pop("noise"), + ) + self._nstates = 0 + self._nparameters = 3 + + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + @classmethod + def from_options(cls, kwargs: Dict): + """ Load from options """ + options = {} + options["name"] = kwargs.pop("name") + options["parameters"] = ReLUParameterOptions.from_options( + kwargs["parameters"] + ) + options["visual"] = kwargs.get("visual", None) + options["state"] = None + options["noise"] = kwargs.pop("noise", None) + return cls(**options) + + +class ReLUParameterOptions(NodeParameterOptions): + + def __init__(self, **kwargs): + super().__init__() + self.gain = kwargs.pop("gain") + self.sign = kwargs.pop("sign") + self.offset = kwargs.pop("offset") + + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + @classmethod + def defaults(cls, **kwargs): + """ Get the default parameters for ReLU Node model """ + options = {} + options["gain"] = kwargs.pop("gain", 1.0) + options["sign"] = kwargs.pop("sign", 1) + options["offset"] = kwargs.pop("offset", 0.0) + + return cls(**options) + + +############################################ +# Phase-Amplitude Oscillator Model Options # +############################################ +class OscillatorNodeOptions(NodeOptions): + """ Class to define the properties of Oscillator node model """ + + MODEL = Models.OSCILLATOR + + def __init__(self, **kwargs): + """ Initialize """ + + super().__init__( + name=kwargs.pop("name"), + model=OscillatorNodeOptions.MODEL, + parameters=kwargs.pop("parameters"), + visual=kwargs.pop("visual"), + state=kwargs.pop("state"), + noise=kwargs.pop("noise"), + ) + self._nstates = 3 + self._nparameters = 3 + + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + @classmethod + def from_options(cls, kwargs: Dict): + """ Load from options """ + options = {} + options["name"] = kwargs.pop("name") + options["parameters"] = OscillatorNodeParameterOptions.from_options( + kwargs["parameters"] + ) + options["visual"] = NodeVisualOptions.from_options( + kwargs["visual"] + ) + options["state"] = OscillatorStateOptions.from_options( + kwargs["state"] + ) + options["noise"] = None + if kwargs["noise"] is not None: + options["noise"] = NoiseOptions.from_options( + kwargs["noise"] + ) + return cls(**options) + + +class OscillatorNodeParameterOptions(NodeParameterOptions): + + def __init__(self, **kwargs): + super().__init__() + self.intrinsic_frequency = kwargs.pop("intrinsic_frequency") # Hz + self.nominal_amplitude = kwargs.pop("nominal_amplitude") # + self.amplitude_rate = kwargs.pop("amplitude_rate") # + + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + @classmethod + def defaults(cls, **kwargs): + """ Get the default parameters for Oscillator Node model """ + + options = {} + + options["intrinsic_frequency"] = kwargs.pop("intrinsic_frequency", 1.0) + options["nominal_amplitude"] = kwargs.pop("nominal_amplitude", 1.0) + options["amplitude_rate"] = kwargs.pop("amplitude_rate", 1.0) + + return cls(**options) + + +class OscillatorStateOptions(NodeStateOptions): + """ Oscillator node state options """ + + STATE_NAMES = ["phase", "amplitude_0", "amplitude"] + + def __init__(self, initial): + super().__init__(initial=initial) + + +class OscillatorEdgeOptions(EdgeOptions): + """ Oscillator Edge Options """ + MODEL = Models.OSCILLATOR + + def __init__(self, **kwargs): + parameters = kwargs.pop("parameters") + assert isinstance(parameters, OscillatorEdgeParameterOptions) + super().__init__( + source=kwargs.pop("source"), + target=kwargs.pop("target"), + model=OscillatorEdgeOptions.MODEL, + weight=kwargs.pop("weight"), + type=kwargs.pop("type"), + parameters=parameters, + visual=kwargs.pop("visual"), + ) + self._nparameters = 1 + + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + @classmethod + def from_options(cls, kwargs: Dict): + """ Load from options """ + options = {} + options["source"] = kwargs["source"] + options["target"] = kwargs["target"] + options["weight"] = kwargs["weight"] + options["type"] = kwargs["type"] + options["parameters"] = OscillatorEdgeParameterOptions.from_options( + kwargs["parameters"] + ) + options["visual"] = EdgeVisualOptions.from_options(kwargs["visual"]) + return cls(**options) + + +class OscillatorEdgeParameterOptions(EdgeParameterOptions): + """ Oscillator edge parameter options """ + + def __init__(self, **kwargs): + super().__init__() + self.phase_difference = kwargs.pop("phase_difference") # radians + + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + @classmethod + def defaults(cls, **kwargs): + """ Get the default parameters for Oscillator Node model """ + + options = {} + options["phase_difference"] = kwargs.pop("phase_difference", 0.0) + return cls(**options) + + +################################# +# Hopf-Oscillator Model Options # +################################# +class HopfOscillatorNodeOptions(NodeOptions): + """ Class to define the properties of HopfOscillator node model """ + + def __init__(self, **kwargs): + """ Initialize """ + model = "hopf_oscillator" + super().__init__( + name=kwargs.pop("name"), + model=model, + parameters=kwargs.pop("parameters"), + visual=kwargs.pop("visual"), + state=kwargs.pop("state"), + noise=kwargs.pop("noise"), + ) + self._nstates = 2 + self._nparameters = 4 + + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + @classmethod + def from_options(cls, kwargs: Dict): + """ Load from options """ + options = {} + options["name"] = kwargs.pop("name") + options["parameters"] = HopfOscillatorNodeParameterOptions.from_options( + kwargs["parameters"] + ) + options["visual"] = NodeVisualOptions.from_options( + kwargs["visual"] + ) + options["state"] = HopfOscillatorStateOptions.from_options( + kwargs["state"] + ) + options["noise"] = None + if kwargs["noise"] is not None: + options["noise"] = NoiseOptions.from_options( + kwargs["noise"] + ) + return cls(**options) + + +class HopfOscillatorNodeParameterOptions(NodeParameterOptions): + + def __init__(self, **kwargs): + super().__init__() + self.mu = kwargs.pop("mu") + self.omega = kwargs.pop("omega") + self.alpha = kwargs.pop("alpha") + self.beta = kwargs.pop("beta") + + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + @classmethod + def defaults(cls, **kwargs): + """ Get the default parameters for HopfOscillator Node model """ + + options = {} + + options["mu"] = kwargs.pop("mu", 0.1) + options["omega"] = kwargs.pop("omega", 0.1) + options["alpha"] = kwargs.pop("alpha", 1.0) + options["beta"] = kwargs.pop("beta", 1.0) + + return cls(**options) + + +class HopfOscillatorStateOptions(NodeStateOptions): + """ HopfOscillator node state options """ + + STATE_NAMES = ["x", "y"] + + def __init__(self, initial): + super().__init__(initial) + + +################################## +# Leaky Integrator Model Options # +################################## +class LeakyIntegratorNodeOptions(NodeOptions): + """ Class to define the properties for standard leaky integrator model """ + + def __init__(self, **kwargs): + """ Initialize """ + model = "leaky_integrator" + super().__init__( + name=kwargs.pop("name"), + model=model, + parameters=kwargs.pop("parameters"), + visual=kwargs.pop("visual"), + state=kwargs.pop("state"), + noise=kwargs.pop("noise"), + ) + self._nstates = 1 + self._nparameters = 3 + + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + @classmethod + def from_options(cls, kwargs: Dict): + """ Load from options """ + options = {} + options["name"] = kwargs.pop("name") + options["parameters"] = LeakyIntegratorParameterOptions.from_options( + kwargs["parameters"] + ) + options["visual"] = NodeVisualOptions.from_options( + kwargs["visual"] + ) + options["state"] = LeakyIntegratorStateOptions.from_options( + kwargs["state"] + ) + options["noise"] = None + if kwargs["noise"] is not None: + options["noise"] = NoiseOptions.from_options( + kwargs["noise"] + ) + return cls(**options) + + +class LeakyIntegratorParameterOptions(NodeParameterOptions): + """ + Class to define the parameters of Leaky Integrator model. + + Attributes: + tau (float): Time constant. + bias (float) + D (float) + """ + + def __init__(self, **kwargs): + super().__init__() + self.tau = kwargs.pop("tau") + self.bias = kwargs.pop("bias") + self.D = kwargs.pop("D") + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + @classmethod + def defaults(cls, **kwargs): + """ Get the default parameters for LI Danner Node model """ + + options = {} + + options["tau"] = kwargs.pop("tau", 0.1) + options["bias"] = kwargs.pop("bias", -2.75) + options["D"] = kwargs.pop("D", 1.0) + + return cls(**options) + + +class LeakyIntegratorStateOptions(NodeStateOptions): + """ LeakyIntegrator node state options """ + + STATE_NAMES = ["m",] + + def __init__(self, initial): + super().__init__(initial) + + +######################################### +# Leaky Integrator Danner Model Options # +######################################### +class LIDannerNodeOptions(NodeOptions): + """ Class to define the properties of Leaky integrator danner node model """ + + MODEL = Models.LI_DANNER + + def __init__(self, **kwargs): + """ Initialize """ + super().__init__( + name=kwargs.pop("name"), + model=LIDannerNodeOptions.MODEL, + parameters=kwargs.pop("parameters"), + visual=kwargs.pop("visual"), + state=kwargs.pop("state"), + noise=kwargs.pop("noise"), + ) + self._nstates = 2 + self._nparameters = 10 + + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + @classmethod + def from_options(cls, kwargs: Dict): + """ Load from options """ + options = {} + options["name"] = kwargs.pop("name") + options["parameters"] = LIDannerNodeParameterOptions.from_options( + kwargs["parameters"] + ) + options["visual"] = kwargs.get("visual", None) + options["state"] = LIDannerStateOptions.from_options( + kwargs["state"] + ) + options["noise"] = None + if kwargs["noise"] is not None: + options["noise"] = NoiseOptions.from_options( + kwargs["noise"] + ) + return cls(**options) + + +class LIDannerNodeParameterOptions(NodeParameterOptions): + """ + Class to define the parameters of Leaky Integrator Danner node model. + + Attributes: + c_m (float): Membrane capacitance (in pF). + g_leak (float): Leak conductance (in nS). + e_leak (float): Leak reversal potential (in mV). + v_max (float): Maximum voltage (in mV). + v_thr (float): Threshold voltage (in mV). + g_syn_e (float): Excitatory synaptic conductance (in nS). + g_syn_i (float): Inhibitory synaptic conductance (in nS). + e_syn_e (float): Excitatory synaptic reversal potential (in mV). + e_syn_i (float): Inhibitory synaptic reversal potential (in mV). + tau_ch (float): Cholinergic time constant (in mS) + """ + + def __init__(self, **kwargs): + super().__init__() + self.c_m = kwargs.pop("c_m") # pF + self.g_leak = kwargs.pop("g_leak") # nS + self.e_leak = kwargs.pop("e_leak") # mV + self.v_max = kwargs.pop("v_max") # mV + self.v_thr = kwargs.pop("v_thr") # mV + self.g_syn_e = kwargs.pop("g_syn_e") # nS + self.g_syn_i = kwargs.pop("g_syn_i") # nS + self.e_syn_e = kwargs.pop("e_syn_e") # mV + self.e_syn_i = kwargs.pop("e_syn_i") # mV + self.tau_ch = kwargs.pop("tau_ch", 5.0) # tau-cholinergic + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + @classmethod + def defaults(cls, **kwargs): + """ Get the default parameters for LI Danner Node model """ + + options = {} + options["c_m"] = kwargs.pop("c_m", 10.0) + options["g_leak"] = kwargs.pop("g_leak", 2.8) + options["e_leak"] = kwargs.pop("e_leak", -60.0) + options["v_max"] = kwargs.pop("v_max", 0.0) + options["v_thr"] = kwargs.pop("v_thr", -50.0) + options["g_syn_e"] = kwargs.pop("g_syn_e", 10.0) + options["g_syn_i"] = kwargs.pop("g_syn_i", 10.0) + options["e_syn_e"] = kwargs.pop("e_syn_e", -10.0) + options["e_syn_i"] = kwargs.pop("e_syn_i", -75.0) + options["tau_ch"] = kwargs.pop("tau_ch", 5.0) + return cls(**options) + + +class LIDannerStateOptions(NodeStateOptions): + """ LI Danner node state options """ + + STATE_NAMES = ["v", "a"] + + def __init__(self, initial): + super().__init__(initial=initial) + + +################################################## +# Leaky Integrator With NaP Danner Model Options # +################################################## +class LINaPDannerNodeOptions(NodeOptions): + """ Class to define the properties of Leaky integrator danner node model """ + + def __init__(self, **kwargs): + """ Initialize """ + model = "li_nap_danner" + super().__init__( + name=kwargs.pop("name"), + model=model, + parameters=kwargs.pop("parameters"), + visual=kwargs.pop("visual"), + state=kwargs.pop("state"), + noise=kwargs.pop("noise"), + ) + self._nstates = 2 + self._nparameters = 19 + + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + @classmethod + def from_options(cls, kwargs: Dict): + """ Load from options """ + options = {} + options["name"] = kwargs.pop("name") + options["parameters"] = LINaPDannerNodeParameterOptions.from_options( + kwargs["parameters"] + ) + options["visual"] = kwargs.get("visual", None) + options["state"] = LINaPDannerStateOptions.from_options( + kwargs["state"] + ) + options["noise"] = None + if kwargs["noise"] is not None: + options["noise"] = NoiseOptions.from_options( + kwargs["noise"] + ) + return cls(**options) + + +class LINaPDannerNodeParameterOptions(NodeParameterOptions): + """ Class to define the parameters of Leaky integrator danner node model """ + + def __init__(self, **kwargs): + super().__init__() + + self.c_m = kwargs.pop("c_m") # pF + self.g_nap = kwargs.pop("g_nap") # nS + self.e_na = kwargs.pop("e_na") # mV + self.v1_2_m = kwargs.pop("v1_2_m") # mV + self.k_m = kwargs.pop("k_m") # + self.v1_2_h = kwargs.pop("v1_2_h") # mV + self.k_h = kwargs.pop("k_h") # + self.v1_2_t = kwargs.pop("v1_2_t") # mV + self.k_t = kwargs.pop("k_t") # + self.g_leak = kwargs.pop("g_leak") # nS + self.e_leak = kwargs.pop("e_leak") # mV + self.tau_0 = kwargs.pop("tau_0") # mS + self.tau_max = kwargs.pop("tau_max") # mS + self.v_max = kwargs.pop("v_max") # mV + self.v_thr = kwargs.pop("v_thr") # mV + self.g_syn_e = kwargs.pop("g_syn_e") # nS + self.g_syn_i = kwargs.pop("g_syn_i") # nS + self.e_syn_e = kwargs.pop("e_syn_e") # mV + self.e_syn_i = kwargs.pop("e_syn_i") # mV + + @classmethod + def defaults(cls, **kwargs): + """ Get the default parameters for LI NaP Danner Node model """ + + options = {} + + options["c_m"] = kwargs.pop("c_m", 10.0) # pF + options["g_nap"] = kwargs.pop("g_nap", 4.5) # nS + options["e_na"] = kwargs.pop("e_na", 50.0) # mV + options["v1_2_m"] = kwargs.pop("v1_2_m", -40.0) # mV + options["k_m"] = kwargs.pop("k_m", -6.0) # + options["v1_2_h"] = kwargs.pop("v1_2_h", -45.0) # mV + options["k_h"] = kwargs.pop("k_h", 4.0) # + options["v1_2_t"] = kwargs.pop("v1_2_t", -35.0) # mV + options["k_t"] = kwargs.pop("k_t", 15.0) # + options["g_leak"] = kwargs.pop("g_leak", 4.5) # nS + options["e_leak"] = kwargs.pop("e_leak", -62.5) # mV + options["tau_0"] = kwargs.pop("tau_0", 80.0) # mS + options["tau_max"] = kwargs.pop("tau_max", 160.0) # mS + options["v_max"] = kwargs.pop("v_max", 0.0) # mV + options["v_thr"] = kwargs.pop("v_thr", -50.0) # mV + options["g_syn_e"] = kwargs.pop("g_syn_e", 10.0) + options["g_syn_i"] = kwargs.pop("g_syn_i", 10.0) + options["e_syn_e"] = kwargs.pop("e_syn_e", -10.0) + options["e_syn_i"] = kwargs.pop("e_syn_i", -75.0) + + return cls(**options) + + +class LINaPDannerStateOptions(NodeStateOptions): + """ LI Danner node state options """ + + STATE_NAMES = ["v", "h"] + + def __init__(self, initial): + super().__init__(initial) + + +#################### +# Izhikevich Model # +#################### +class IzhikevichNodeOptions(NodeOptions): + """ Class to define the properties of Leaky integrator danner node model """ + + def __init__(self, **kwargs): + """ Initialize """ + model = "izhikevich" + super().__init__( + name=kwargs.pop("name"), + model=model, + parameters=kwargs.pop("parameters"), + visual=kwargs.pop("visual"), + state=kwargs.pop("state"), + noise=kwargs.pop("noise"), + ) + self._nstates = 2 + self._nparameters = 5 + + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + +class IzhikevichParameterOptions(NodeParameterOptions): + """ Class to define the parameters of Leaky integrator danner node model """ + + def __init__(self, **kwargs): + super().__init__() + + self.recovery_time = kwargs.pop("recovery_time") # pF + self.recovery_sensitivity = kwargs.pop("recovery_sensitivity") # nS + self.membrane_reset = kwargs.pop("membrane_reset") # mV + self.recovery_reset = kwargs.pop("recovery_reset") # mV + self.membrane_threshold = kwargs.pop("membrane_threshold") # mV + + @classmethod + def defaults(cls, **kwargs): + """ Get the default parameters for LI NaP Danner Node model """ + + options = {} + + options["recovery_time"] = kwargs.pop("recovery_time", 0.02) # pF + options["recovery_sensitivity"] = kwargs.pop("recovery_sensitivity", 0.2) # nS + options["membrane_reset"] = kwargs.pop("membrane_reset", -65.0) # mV + options["recovery_reset"] = kwargs.pop("recovery_reset", 2) # mV + options["membrane_threshold"] = kwargs.pop("membrane_threshold", 30.0) # mV + + return cls(**options) + + +class IzhikevichStateOptions(NodeStateOptions): + """ LI Danner node state options """ + + STATE_NAMES = ["v", "u"] + + def __init__(self, initial): + super().__init__(initial) + + +############################## +# Network Base Class Options # +############################## +class NetworkOptions(Options): + """ Base class for neural network options """ + + NODE_TYPES: Dict[Models, Type] = { + Models.RELAY: RelayNodeOptions, + Models.LINEAR: LinearNodeOptions, + Models.RELU: ReLUNodeOptions, + Models.OSCILLATOR: OscillatorNodeOptions, + # Models.HOPF_OSCILLATOR: HopfOscillatorNodeOptions, + # Models.MORPHED_OSCILLATOR: MorphedOscillatorNodeOptions, + # Models.MATSUOKA: MatsuokaNodeOptions, + # Models.FITZHUGH_NAGUMO: FitzhughNagumoNodeOptions, + # Models.MORRIS_LECAR: MorrisLecarNodeOptions, + # Models.LEAKY_INTEGRATOR: LeakyIntegratorNodeOptions, + Models.LI_DANNER: LIDannerNodeOptions, + Models.LI_NAP_DANNER: LINaPDannerNodeOptions, + # Models.LI_DAUN: LIDaunNodeOptions, + # Models.HH_DAUN: HHDaunNodeOptions, + } + + EDGE_TYPES: Dict[Models, Type] = { + Models.BASE: EdgeOptions, + Models.OSCILLATOR: OscillatorEdgeOptions, + } + + def __init__(self, **kwargs): + super().__init__() + + # Default properties to make it compatible with networkx + # seed + self.directed: bool = kwargs.pop("directed", True) + self.multigraph: bool = kwargs.pop("multigraph", False) + self.graph: dict = kwargs.pop("graph", {"name": ""}) + self.units = kwargs.pop("units", None) + self.logs: NetworkLogOptions = kwargs.pop("logs") + self.random_seed: int = kwargs.pop("random_seed", time.time_ns()) + + self.integration = kwargs.pop( + "integration", IntegrationOptions.defaults() + ) + + self.nodes: List[NodeOptions] = kwargs.pop("nodes", []) + self.edges: List[EdgeOptions] = kwargs.pop("edges", []) + + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + @classmethod + def load(cls, file_path: str): + """ Load from file """ + opts = Options.load(file_path) + return NetworkOptions.from_options(opts) + + @classmethod + def from_options(cls, kwargs): + """ From options """ + options = {} + options["directed"] = kwargs["directed"] + options["multigraph"] = kwargs["multigraph"] + options["graph"] = kwargs["graph"] + options["units"] = kwargs["units"] + # Log options + options["logs"] = NetworkLogOptions.from_options(kwargs["logs"]) + # Integration options + options["integration"] = IntegrationOptions.from_options(kwargs["integration"]) + # Nodes + options["nodes"] = [ + cls.NODE_TYPES[node["model"]].from_options(node) + for node in kwargs["nodes"] + ] + # Edges + options["edges"] = [ + cls.EDGE_TYPES[edge["model"]].from_options(edge) + for edge in kwargs["edges"] + ] + return cls(**options) + + def add_node(self, options: NodeOptions): + """ Add a node if it does not already exist in the list """ + assert isinstance(options, NodeOptions), f"{type(options)} not an instance of NodeOptions" + if options not in self.nodes: + self.nodes.append(options) + else: + pylog.warning(f"Node {options.name} already exists and will not be added again.") + + def add_nodes(self, options: Iterable[NodeOptions]): + """ Add a collection of nodes """ + for node in options: + self.add_node(node) + + def add_edge(self, options: EdgeOptions): + """ Add a node if it does not already exist in the list """ + if (options.source in self.nodes) and (options.target in self.nodes): + self.edges.append(options) + else: + missing_nodes = [ + "" if (options.source in self.nodes) else options.source, + "" if (options.target in self.nodes) else options.target, + ] + pylog.debug(f"Missing node {*missing_nodes,} in Edge {options}") + + def add_edges(self, options: Iterable[EdgeOptions]): + """ Add a collection of edges """ + for edge in options: + self.add_edge(edge) + + def __add__(self, other: Self): + """ Combine two network options """ + assert isinstance(other, NetworkOptions) + for node in other.nodes: + self.add_node(node) + for edge in other.edges: + self.add_edge(edge) + return self + + def get_node(self, name: str): + """ Get node options from name """ + for node in self.nodes: + if name == node.name: + return node + raise KeyError(f"Node {name} not found!") + + def get_edge(self, source: str, target: str): + """ Get edge options for target and source node names """ + for edge in self.edges: + if (source == edge.source) and (target == edge.target): + return edge + raise KeyError(f"No edge between source node {source} and target noode {target} found!") + + +################################# +# Numerical Integration Options # +################################# +class IntegrationOptions(Options): + """ Class to set the options for numerical integration """ + + def __init__(self, **kwargs): + super().__init__() + + self.timestep: float = kwargs.pop("timestep") + self.n_iterations: int = int(kwargs.pop("n_iterations")) + self.integrator: str = kwargs.pop("integrator") + self.method: str = kwargs.pop("method") + self.atol: float = kwargs.pop("atol") + self.rtol: float = kwargs.pop("rtol") + self.max_step: float = kwargs.pop("max_step") + self.checks: bool = kwargs.pop("checks") + + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + @classmethod + def defaults(cls, **kwargs): + + options = {} + + options["timestep"] = kwargs.pop("timestep", 1e-3) + options["n_iterations"] = int(kwargs.pop("n_iterations", 1e3)) + options["integrator"] = kwargs.pop("integrator", "rk4") + options["method"] = kwargs.pop("method", "adams") + options["atol"] = kwargs.pop("atol", 1e-12) + options["rtol"] = kwargs.pop("rtol", 1e-6) + options["max_step"] = kwargs.pop("max_step", 0.0) + options["checks"] = kwargs.pop("checks", True) + return cls(**options) + + @classmethod + def from_options(cls, kwargs: Dict): + """ From options """ + return cls(**kwargs) + + +################### +# Logging Options # +################### +class NetworkLogOptions(Options): + """ Log options for the network level + + Configure logging for network events and iterations. + + Attributes: + n_iterations (int): Number of iterations to log. + buffer_size (int): Size of the log buffer. Defaults to n_iterations if 0. + nodes_all (bool): Whether to log all nodes or only selected ones. Defaults to False. + """ + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + self.enable: bool = kwargs.pop('enable', True) + self.buffer_size: int = kwargs.pop('buffer_size') + if self.buffer_size < 0: + pylog.debug("Logging is disabled because buffer size is -1") + assert isinstance(self.buffer_size, int), "buffer_size shoulde be an integer" + self.nodes_all: bool = kwargs.pop("nodes_all", False) + + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + @classmethod + def from_options(cls, kwargs: Dict): + """ From options """ + return cls(**kwargs) diff --git a/farms_network/data/data.py b/farms_network/data/data.py deleted file mode 100644 index c3a5eee..0000000 --- a/farms_network/data/data.py +++ /dev/null @@ -1,60 +0,0 @@ -""" ------------------------------------------------------------------------ -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ - -Main data structure for the network - -""" - -from .data_cy import NetworkDataCy, NeuronDataCy, NeuronsDataCy - - -class NetworkData(NetworkDataCy): - """ Network data """ - - def __init__(self): - """Network data structure""" - - super().__init__() - - self.neurons = None - self.connectivity = None - self.states = None - self.inputs = None - self.outputs = None - - -class NeuronsData(NeuronsDataCy): - """ Neuronal data """ - - def __init__(self): - """ Neurons data """ - - super().__init__() - - - -class NeuronData(NeuronDataCy): - """ Base class for representing an arbitrary neuron data """ - - def __init__(self): - """Neuron data initialization """ - - super().__init__() - - self.consts = None - self.variables = None diff --git a/farms_network/data/data_cy.pxd b/farms_network/data/data_cy.pxd deleted file mode 100644 index c928731..0000000 --- a/farms_network/data/data_cy.pxd +++ /dev/null @@ -1,56 +0,0 @@ -""" ------------------------------------------------------------------------ -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ -""" - - -################################## -########## Network data ########## -################################## - -cdef class NetworkDataCy: - """ Network data """ - - def __init__(self): - """ network data initialization """ - - super().__init__() - ... - - -################################## -########## Neurons Data ########## -################################## - -cdef class NeuronsDataCy: - """ Neurons data """ - - def __init__(self): - """ neurons data initialization """ - - super().__init__() - ... - - -cdef class NeuronDataCy: - """ Neuron data """ - - def __init__(self): - """ neurons data initialization """ - - super().__init__() - ... diff --git a/farms_network/fitzhugh_nagumo.pxd b/farms_network/fitzhugh_nagumo.pxd deleted file mode 100644 index 6a73c04..0000000 --- a/farms_network/fitzhugh_nagumo.pxd +++ /dev/null @@ -1,66 +0,0 @@ -""" ----------------------------------------------------------------------- -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ - -Fitzhugh Nagumo model. - -""" - -from farms_container.parameter cimport Parameter -from farms_network.neuron cimport Neuron - -cdef struct FNNeuronInput: - int neuron_idx - int weight_idx - int phi_idx - -cdef class FitzhughNagumo(Neuron): - cdef: - readonly str n_id - - unsigned int num_inputs - - # parameters - # constants - double a - double b - double tau - double internal_curr - - # states - Parameter V - Parameter w - - # inputs - Parameter ext_in - - # ode - Parameter V_dot - Parameter w_dot - - # Ouputs - Parameter nout - - # neuron connenctions - FNNeuronInput[:] neuron_inputs - - cdef: - void c_ode_rhs(self, double[:] _y, double[:] _w, double[:] _p) - void c_output(self) - cdef double c_neuron_inputs_eval( - self, double _neuron_out, double _weight, double _phi, - double _V, double _w) diff --git a/farms_network/fitzhugh_nagumo.pyx b/farms_network/fitzhugh_nagumo.pyx deleted file mode 100644 index 0395af5..0000000 --- a/farms_network/fitzhugh_nagumo.pyx +++ /dev/null @@ -1,161 +0,0 @@ -""" ----------------------------------------------------------------------- -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ - -Fitzhugh Nagumo model - -""" -from libc.stdio cimport printf -import farms_pylog as pylog -import numpy as np -cimport numpy as cnp - - -cdef class FitzhughNagumo(Neuron): - - def __init__(self, n_id, num_inputs, neural_container, **kwargs): - """Initialize. - - Parameters - ---------- - n_id: str - Unique ID for the neuron in the network. - """ - super(FitzhughNagumo, self).__init__('leaky') - - # Neuron ID - self.n_id = n_id - - # Initialize parameters - (_, self.a) = neural_container.constants.add_parameter( - 'a_' + self.n_id, kwargs.get('a', 0.7)) - - (_, self.b) = neural_container.constants.add_parameter( - 'b_' + self.n_id, kwargs.get('b', 0.8)) - - (_, self.tau) = neural_container.constants.add_parameter( - 'tau_' + self.n_id, kwargs.get('tau', 1/0.08)) - - (_, self.internal_curr) = neural_container.constants.add_parameter( - 'I_' + self.n_id, kwargs.get('I', 1)) - - # Initialize states - self.V = neural_container.states.add_parameter( - 'V_' + self.n_id, kwargs.get('V0', 0.0))[0] - self.w = neural_container.states.add_parameter( - 'w_' + self.n_id, kwargs.get('w0', 0.0))[0] - - # External inputs - self.ext_in = neural_container.inputs.add_parameter( - 'ext_in_' + self.n_id)[0] - - # ODE RHS - self.V_dot = neural_container.dstates.add_parameter( - 'V_dot_' + self.n_id, 0.0)[0] - self.w_dot = neural_container.dstates.add_parameter( - 'w_dot_' + self.n_id, 0.0)[0] - - # Output - self.nout = neural_container.outputs.add_parameter( - 'nout_' + self.n_id, 0.0)[0] - - # Neuron inputs - self.neuron_inputs = cnp.ndarray((num_inputs,), - dtype=[('neuron_idx', 'i'), - ('weight_idx', 'i'), - ('phi_idx', 'i')]) - - self.num_inputs = num_inputs - - def add_ode_input(self, int idx, neuron, neural_container, **kwargs): - """ Add relevant external inputs to the ode.""" - # Create a struct to store the inputs and weights to the neuron - cdef FNNeuronInput n - # Get the neuron parameter - neuron_idx = neural_container.outputs.get_parameter_index( - 'nout_'+neuron.n_id) - - # Add the weight parameter - weight = neural_container.weights.add_parameter( - 'w_' + neuron.n_id + '_to_' + self.n_id, - kwargs.get('weight', 0.0))[0] - phi = neural_container.parameters.add_parameter( - 'phi_' + neuron.n_id + '_to_' + self.n_id, - kwargs.get('phi', 0.0))[0] - - weight_idx = neural_container.weights.get_parameter_index( - 'w_' + neuron.n_id + '_to_' + self.n_id) - phi_idx = neural_container.parameters.get_parameter_index( - 'phi_' + neuron.n_id + '_to_' + self.n_id) - - n.neuron_idx = neuron_idx - n.weight_idx = weight_idx - n.phi_idx = phi_idx - cdef double x = self.a - # Append the struct to the list - self.neuron_inputs[idx] = n - - def output(self): - """Neuron activation function. - Parameters - ---------- - m_potential: float - Neuron membrane potential - """ - return self.c_output() - - def ode_rhs(self, y, w, p): - """ Python interface to the ode_rhs computation.""" - self.c_ode_rhs(y, w, p) - - #################### C-FUNCTIONS #################### - cdef void c_ode_rhs(self, double[:] _y, double[:] _w, double[:] _p): - """ Compute the ODE. Internal Setup Function.""" - - # Current state - cdef double _V = self.V.c_get_value() - cdef double _W = self.w.c_get_value() - - # Neuron inputs - cdef double _sum = 0.0 - cdef unsigned int j - cdef double _neuron_out - cdef double _weight - cdef double _phi - - for j in range(self.num_inputs): - _neuron_out = _y[self.neuron_inputs[j].neuron_idx] - _weight = _w[self.neuron_inputs[j].weight_idx] - _phi = _p[self.neuron_inputs[j].phi_idx] - _sum += self.c_neuron_inputs_eval(_neuron_out, - _weight, _phi, _V, _W) - - # phidot : V_dot - self.V_dot.c_set_value(_V - _V**3/3 - _W + self.internal_curr + _sum) - - # wdot - self.w_dot.c_set_value((1/self.tau)*(_V + self.a - self.b*_W)) - - cdef void c_output(self): - """ Neuron output. """ - self.nout.c_set_value(self.V.c_get_value()) - - cdef double c_neuron_inputs_eval( - self, double _neuron_out, double _weight, double _phi, - double _V, double _w): - """ Evaluate neuron inputs.""" - return _weight*(_neuron_out - _V - _phi) diff --git a/farms_network/hh_daun_motorneuron.pxd b/farms_network/hh_daun_motorneuron.pxd deleted file mode 100644 index 0c0519c..0000000 --- a/farms_network/hh_daun_motorneuron.pxd +++ /dev/null @@ -1,113 +0,0 @@ -""" ----------------------------------------------------------------------- -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ - -Hodgkin Huxley Motor Neuron Based on Daun et.al. -""" - -from farms_container.parameter cimport Parameter -from farms_network.neuron cimport Neuron - -cdef struct DaunMotorNeuronInput: - int neuron_idx - int g_syn_idx - int e_syn_idx - int gamma_s_idx - int v_h_s_idx - -cdef class HHDaunMotorneuron(Neuron): - cdef: - readonly str n_id - - unsigned int num_inputs - - # parameters - # constants - double g_nap - double e_nap - double am1_nap - double am2_nap - double am3_nap - double bm1_nap - double bm2_nap - double bm3_nap - double ah1_nap - double ah2_nap - double ah3_nap - double bh1_nap - double bh2_nap - double bh3_nap - - # Parameters of IK - double g_k - double e_k - double am1_k - double am2_k - double am3_k - double bm1_k - double bm2_k - double bm3_k - - # Parameters of Iq - double g_q - double e_q - double gamma_q - double r_q - double v_m_q - - # Parameters of Ileak - double g_leak - double e_leak - - # Parameters of Isyn - double g_syn - double e_syn - double v_hs - double gamma_s - - # Other constants - double c_m - - # State Variables - Parameter v - Parameter m_na - Parameter h_na - Parameter m_k - Parameter m_q - - # ODE - Parameter vdot - Parameter m_na_dot - Parameter h_na_dot - Parameter m_k_dot - Parameter m_q_dot - - # External Input - Parameter g_app - Parameter e_app - - # Output - Parameter nout - - # neuron connenctions - DaunMotorNeuronInput[:] neuron_inputs - - cdef: - void c_ode_rhs(self, double[:] _y, double[:] _w, double[:] _p) - void c_output(self) - double c_neuron_inputs_eval(self, double _neuron_out, double _g_syn, double _e_syn, - double _gamma_s, double _v_h_s) diff --git a/farms_network/hh_daun_motorneuron.pyx b/farms_network/hh_daun_motorneuron.pyx deleted file mode 100644 index be2a19d..0000000 --- a/farms_network/hh_daun_motorneuron.pyx +++ /dev/null @@ -1,334 +0,0 @@ -""" ----------------------------------------------------------------------- -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ - -HH-Daun Motor neuron -""" -from libc.stdio cimport printf -import numpy as np -from libc.math cimport exp as cexp -from libc.math cimport cosh as ccosh -from libc.math cimport fabs as cfabs -cimport numpy as cnp - - -cdef class HHDaunMotorneuron(Neuron): - """Hodgkin Huxley Neuron Model - Based on Silvia Daun and Tbor's model. - """ - - def __init__(self, n_id, num_inputs, neural_container, **kwargs): - super(HHDaunMotorneuron, self).__init__('hh_daun_motorneuron') - - self.n_id = n_id # Unique neuron identifier - # Constants - # Neuron constants - # Parameters of INaP - (_, self.g_nap) = neural_container.constants.add_parameter( - 'g_nap' + self.n_id, kwargs.get('g_nap', 10.0)) - (_, self.e_nap) = neural_container.constants.add_parameter( - 'e_nap' + self.n_id, kwargs.get('e_nap', 55.0)) - (_, self.am1_nap) = neural_container.constants.add_parameter( - 'am1_nap' + self.n_id, kwargs.get('am1_nap', 0.32)) - (_, self.am2_nap) = neural_container.constants.add_parameter( - 'am2_nap' + self.n_id, kwargs.get('am2_nap', -51.90)) - (_, self.am3_nap) = neural_container.constants.add_parameter( - 'am3_nap' + self.n_id, kwargs.get('am3_nap', 0.25)) - (_, self.bm1_nap) = neural_container.constants.add_parameter( - 'bm1_nap' + self.n_id, kwargs.get('bm1_nap', -0.280)) - (_, self.bm2_nap) = neural_container.constants.add_parameter( - 'bm2_nap' + self.n_id, kwargs.get('bm2_nap', -24.90)) - (_, self.bm3_nap) = neural_container.constants.add_parameter( - 'bm3_nap' + self.n_id, kwargs.get('bm3_nap', -0.2)) - (_, self.ah1_nap) = neural_container.constants.add_parameter( - 'ah1_nap' + self.n_id, kwargs.get('ah1_nap', 0.1280)) - (_, self.ah2_nap) = neural_container.constants.add_parameter( - 'ah2_nap' + self.n_id, kwargs.get('ah2_nap', -48.0)) - (_, self.ah3_nap) = neural_container.constants.add_parameter( - 'ah3_nap' + self.n_id, kwargs.get('ah3_nap', 0.0556)) - (_, self.bh1_nap) = neural_container.constants.add_parameter( - 'bh1_nap' + self.n_id, kwargs.get('bh1_nap', 4.0)) - (_, self.bh2_nap) = neural_container.constants.add_parameter( - 'bh2_nap' + self.n_id, kwargs.get('bh2_nap', -25.0)) - (_, self.bh3_nap) = neural_container.constants.add_parameter( - 'bh3_nap' + self.n_id, kwargs.get('bh3_nap', 0.20)) - - # Parameters of IK - (_, self.g_k) = neural_container.constants.add_parameter( - 'g_k' + self.n_id, kwargs.get('g_k', 2.0)) - (_, self.e_k) = neural_container.constants.add_parameter( - 'e_k' + self.n_id, kwargs.get('e_k', -80.0)) - (_, self.am1_k) = neural_container.constants.add_parameter( - 'am1_k' + self.n_id, kwargs.get('am1_k', 0.0160)) - (_, self.am2_k) = neural_container.constants.add_parameter( - 'am2_k' + self.n_id, kwargs.get('am2_k', -29.90)) - (_, self.am3_k) = neural_container.constants.add_parameter( - 'am3_k' + self.n_id, kwargs.get('am3_k', 0.20)) - (_, self.bm1_k) = neural_container.constants.add_parameter( - 'bm1_k' + self.n_id, kwargs.get('bm1_k', 0.250)) - (_, self.bm2_k) = neural_container.constants.add_parameter( - 'bm2_k' + self.n_id, kwargs.get('bm2_k', -45.0)) - (_, self.bm3_k) = neural_container.constants.add_parameter( - 'bm3_k' + self.n_id, kwargs.get('bm3_k', 0.025)) - - # Parameters of Iq - (_, self.g_q) = neural_container.constants.add_parameter( - 'g_q' + self.n_id, kwargs.get('g_q', 12.0)) - (_, self.e_q) = neural_container.constants.add_parameter( - 'e_q' + self.n_id, kwargs.get('e_q', -80.0)) - (_, self.gamma_q) = neural_container.constants.add_parameter( - 'gamma_q' + self.n_id, kwargs.get('gamma_q', -0.6)) - (_, self.r_q) = neural_container.constants.add_parameter( - 'r_q' + self.n_id, kwargs.get('r_q', 0.0005)) - (_, self.v_m_q) = neural_container.constants.add_parameter( - 'v_m_q' + self.n_id, kwargs.get('v_m_q', -30.0)) - - # Parameters of Ileak - (_, self.g_leak) = neural_container.constants.add_parameter( - 'g_leak' + self.n_id, kwargs.get('g_leak', 0.8)) - (_, self.e_leak) = neural_container.constants.add_parameter( - 'e_leak' + self.n_id, kwargs.get('e_leak', -70.0)) - - # Parameters of Isyn - (_, self.g_syn) = neural_container.constants.add_parameter( - 'g_syn' + self.n_id, kwargs.get('g_syn', 0.1)) - (_, self.e_syn) = neural_container.constants.add_parameter( - 'e_syn' + self.n_id, kwargs.get('e_syn', 0.0)) - (_, self.v_hs) = neural_container.constants.add_parameter( - 'v_hs' + self.n_id, kwargs.get('v_hs', -43.0)) - (_, self.gamma_s) = neural_container.constants.add_parameter( - 'gamma_s' + self.n_id, kwargs.get('gamma_s', -0.42)) - - # Other constants - (_, self.c_m) = neural_container.constants.add_parameter( - 'c_m' + self.n_id, kwargs.get('c_m', 1.0)) - - # State Variables - # pylint: disable=invalid-name - # Membrane potential - self.v = neural_container.states.add_parameter( - 'V_' + self.n_id, kwargs.get('v0', -65.0))[0] - self.m_na = neural_container.states.add_parameter( - 'm_na_' + self.n_id, kwargs.get('m_na0', 0.9))[0] - self.h_na = neural_container.states.add_parameter( - 'h_na_' + self.n_id, kwargs.get('h_na0', 0.0))[0] - self.m_k = neural_container.states.add_parameter( - 'm_k_' + self.n_id, kwargs.get('m_k0', 0.0))[0] - self.m_q = neural_container.states.add_parameter( - 'm_q_' + self.n_id, kwargs.get('m_q0', 0.0))[0] - - # ODE - self.vdot = neural_container.dstates.add_parameter( - 'vdot_' + self.n_id, 0.0)[0] - self.m_na_dot = neural_container.dstates.add_parameter( - 'm_na_dot_' + self.n_id, 0.0)[0] - self.h_na_dot = neural_container.dstates.add_parameter( - 'h_na_dot_' + self.n_id, 0.0)[0] - self.m_k_dot = neural_container.dstates.add_parameter( - 'm_k_dot_' + self.n_id, 0.0)[0] - self.m_q_dot = neural_container.dstates.add_parameter( - 'm_q_dot_' + self.n_id, 0.0)[0] - - # External Input - self.g_app = neural_container.inputs.add_parameter( - 'g_app_' + self.n_id, kwargs.get('g_app', 0.19))[0] - self.e_app = neural_container.inputs.add_parameter( - 'e_app_' + self.n_id, kwargs.get('e_app', 0.0))[0] - - # Output - self.nout = neural_container.outputs.add_parameter( - 'nout_' + self.n_id, 0.0)[0] - - # Neuron inputs - self.num_inputs = num_inputs - self.neuron_inputs = cnp.ndarray((num_inputs,), - dtype=[('neuron_idx', 'i'), - ('g_syn_idx', 'i'), - ('e_syn_idx', 'i'), - ('gamma_s_idx', 'i'), - ('v_h_s_idx', 'i')]) - - def add_ode_input(self, int idx, neuron, neural_container, **kwargs): - """ Add relevant external inputs to the ode. - Parameters - ---------- - neuron : - Neuron model from which the input is received. - weight : - Strength of the synapse between the two neurons""" - - # Create a struct to store the inputs and weights to the neuron - cdef DaunMotorNeuronInput n - # Get the neuron parameter - neuron_idx = neural_container.outputs.get_parameter_index( - 'nout_'+neuron.n_id) - - g_syn = neural_container.parameters.add_parameter( - 'g_syn_' + self.n_id, kwargs.pop('g_syn', 0.0))[0] - e_syn = neural_container.parameters.add_parameter( - 'e_syn_' + self.n_id, kwargs.pop('e_syn', 0.0))[0] - gamma_s = neural_container.parameters.add_parameter( - 'gamma_s_' + self.n_id, kwargs.pop('gamma_s', 0.0))[0] - v_h_s = neural_container.parameters.add_parameter( - 'v_h_s_' + self.n_id, kwargs.pop('v_h_s', 0.0))[0] - - # Get neuron parameter indices - g_syn_idx = neural_container.parameters.get_parameter_index( - 'g_syn_' + self.n_id) - e_syn_idx = neural_container.parameters.get_parameter_index( - 'e_syn_' + self.n_id) - gamma_s_idx = neural_container.parameters.get_parameter_index( - 'gamma_s_' + self.n_id) - v_h_s_idx = neural_container.parameters.get_parameter_index( - 'v_h_s_' + self.n_id) - - # Add the indices to the struct - n.neuron_idx = neuron_idx - n.g_syn_idx = g_syn_idx - n.e_syn_idx = e_syn_idx - n.gamma_s_idx = gamma_s_idx - n.v_h_s_idx = v_h_s_idx - - # Append the struct to the list - self.neuron_inputs[idx] = n - - def output(self): - """Neuron activation function. - Parameters - ---------- - m_potential: float - Neuron membrane potential - """ - return self.c_output() - - def ode_rhs(self, y, w, p): - """ Python interface to the ode_rhs computation.""" - self.c_ode_rhs(y, w, p) - - #################### C-FUNCTIONS #################### - cdef void c_ode_rhs(self, double[:] _y, double[:] _w, double[:] _p): - """ Compute the ODE. Internal Setup Function.""" - - # States - cdef double _v = self.v.c_get_value() - cdef double _m_na = self.m_na.c_get_value() - cdef double _h_na = self.h_na.c_get_value() - cdef double _m_k = self.m_k.c_get_value() - cdef double _m_q = self.m_q.c_get_value() - - # alpha_m_Na(V) - cdef double a_m_nap = (self.am1_nap * (self.am2_nap - _v)) / ( - cexp(self.am3_nap * (self.am2_nap - _v)) - 1) - - # beta_m_Na(V) - cdef double b_m_nap = (self.bm1_nap * (self.bm2_nap - _v)) / ( - cexp(self.bm3_nap * (self.bm2_nap - _v)) - 1) - - # alpha_m_Na(V) - cdef double a_h_nap = self.ah1_nap * cexp( - self.ah3_nap * (self.ah2_nap - _v)) - - # beta_m_Na(V) - cdef double b_h_nap = (self.bh1_nap) / ( - cexp(self.bh3_nap * (self.bh2_nap - _v)) + 1) - - # Inap - # pylint: disable=no-member - cdef double i_nap = self.g_nap * _m_na * _h_na * ( - _v - self.e_nap) - - # alpha_m_K - cdef double a_m_k = (self.am1_k * (self.am2_k - _v)) / ( - cexp(self.am3_k * (self.am2_k - _v)) - 1) - - # beta_m_K - cdef double b_m_k = self.bm1_k * cexp(self.bm3_k * (self.bm2_k - _v)) - - # Ik - # pylint: disable=no-member - cdef double i_k = self.g_k * _m_k * (_v - self.e_k) - - # m_q_inf - cdef double m_q_inf = 1./(1 + cexp(self.gamma_q * (_v - self.v_m_q))) - - # alpha_m_q - cdef double a_m_q = m_q_inf * self.r_q - - # beta_m_q - cdef double b_m_q = (1 - m_q_inf) * self.r_q - - # Ileak - cdef double i_leak = self.g_leak * (_v - self.e_leak) - - # Iapp - cdef double i_app = self.g_app.c_get_value() * ( - _v - self.e_app.c_get_value()) - - # m_na_dot - self.m_na_dot.c_set_value(a_m_nap*(1 - _m_na) - b_m_nap*_m_na) - - # h_na_dot - self.h_na_dot.c_set_value(a_h_nap*(1 - _h_na) - b_h_nap*_h_na) - - # m_k_dot - self.m_k_dot.c_set_value(a_m_k*(1 - _m_k) - b_m_k*_m_k) - - # m_q_dot - self.m_q_dot.c_set_value(a_m_q * (1 - _m_q) - b_m_q * _m_q) - - # Iq - # pylint: disable=no-member - cdef double i_q = self.g_q * self.m_q_dot.c_get_value() * (_v - self.e_q) - - # Neuron inputs - cdef double _sum = 0.0 - cdef unsigned int j - cdef double _neuron_out - cdef double _g_syn - cdef double _e_syn - cdef double _gamma_s - cdef double _v_h_s - cdef DaunMotorNeuronInput _neuron - - for j in range(self.num_inputs): - _neuron = self.neuron_inputs[j] - _neuron_out = _y[_neuron.neuron_idx] - _g_syn = _p[_neuron.g_syn_idx] - _e_syn = _p[_neuron.e_syn_idx] - _gamma_s = _p[_neuron.gamma_s_idx] - _v_h_s = _p[_neuron.v_h_s_idx] - _sum += self.c_neuron_inputs_eval( - _neuron_out, _g_syn, _e_syn, _gamma_s, _v_h_s) - - # dV - self.vdot.c_set_value(( - -i_nap - i_k - i_q - i_leak - i_app - _sum)/self.c_m) - - cdef void c_output(self): - """ Neuron output. """ - # Set the neuron output - self.nout.c_set_value(self.v.c_get_value()) - - cdef double c_neuron_inputs_eval( - self, double _neuron_out, double _g_syn, double _e_syn, - double _gamma_s, double _v_h_s): - """ Evaluate neuron inputs.""" - cdef double _v = self.v.c_get_value() - - cdef double _s_inf = 1./(1. + cexp(_gamma_s*(_neuron_out - _v_h_s))) - - return _g_syn*_s_inf*(_v - _e_syn) diff --git a/farms_network/hopf_oscillator.pxd b/farms_network/hopf_oscillator.pxd deleted file mode 100644 index 96e9fa6..0000000 --- a/farms_network/hopf_oscillator.pxd +++ /dev/null @@ -1,67 +0,0 @@ -""" ------------------------------------------------------------------------ -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ - -Hopf oscillator model - -[1]L. Righetti and A. J. Ijspeert, “Pattern generators with sensory -feedback for the control of quadruped locomotion,” in 2008 IEEE -International Conference on Robotics and Automation, May 2008, -pp. 819–824. doi: 10.1109/ROBOT.2008.4543306. -""" - -from farms_container.parameter cimport Parameter -from farms_network.neuron cimport Neuron - -cdef struct HopfOscillatorNeuronInput: - int neuron_idx - int weight_idx - -cdef class HopfOscillator(Neuron): - cdef: - readonly str n_id - - unsigned int num_inputs - - # parameters - # constants - double mu - double omega - double alpha - double beta - - # states - Parameter x - Parameter y - - # inputs - Parameter ext_in - - # ode - Parameter xdot - Parameter ydot - - # Ouputs - Parameter nout - - # neuron connenctions - HopfOscillatorNeuronInput[:] neuron_inputs - - cdef: - void c_ode_rhs(self, double[:] _y, double[:] _w, double[:] _p) - void c_output(self) - double c_neuron_inputs_eval(self, double _neuron_out, double _weight) diff --git a/farms_network/hopf_oscillator.pyx b/farms_network/hopf_oscillator.pyx deleted file mode 100644 index d21ae52..0000000 --- a/farms_network/hopf_oscillator.pyx +++ /dev/null @@ -1,146 +0,0 @@ -""" ------------------------------------------------------------------------ -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ - -Hopf Oscillator - -[1]L. Righetti and A. J. Ijspeert, “Pattern generators with sensory -feedback for the control of quadruped locomotion,” in 2008 IEEE -International Conference on Robotics and Automation, May 2008, -pp. 819–824. doi: 10.1109/ROBOT.2008.4543306. - -""" -from libc.math cimport exp -import numpy as np -cimport numpy as cnp - -cdef class HopfOscillator(Neuron): - - def __init__(self, n_id, num_inputs, neural_container, **kwargs): - """Initialize. - - Parameters - ---------- - n_id: str - Unique ID for the neuron in the network. - """ - super(HopfOscillator, self).__init__('leaky', n_id) - # Neuron ID - self.n_id = n_id - # Initialize parameters - (_, self.mu) = neural_container.constants.add_parameter( - 'mu_' + self.n_id, kwargs.get('mu', 0.1)) - (_, self.omega) = neural_container.constants.add_parameter( - 'omega_' + self.n_id, kwargs.get('omega', 0.1)) - (_, self.alpha) = neural_container.constants.add_parameter( - 'alpha_' + self.n_id, kwargs.get('alpha', 1.0)) - (_, self.beta) = neural_container.constants.add_parameter( - 'beta_' + self.n_id, kwargs.get('beta', 1.0)) - - # Initialize states - self.x = neural_container.states.add_parameter( - 'x_' + self.n_id, kwargs.get('x0', 0.0))[0] - self.y = neural_container.states.add_parameter( - 'y_' + self.n_id, kwargs.get('y0', 0.0))[0] - # External inputs - self.ext_in = neural_container.inputs.add_parameter( - 'ext_in_' + self.n_id)[0] - - # ODE RHS - self.xdot = neural_container.dstates.add_parameter( - 'xdot_' + self.n_id, 0.0)[0] - self.ydot = neural_container.dstates.add_parameter( - 'ydot_' + self.n_id, 0.0)[0] - - # Output - self.nout = neural_container.outputs.add_parameter( - 'nout_' + self.n_id, 0.0)[0] - - # Neuron inputs - self.neuron_inputs = cnp.ndarray((num_inputs,), - dtype=[('neuron_idx', 'i'), - ('weight_idx', 'i')]) - - self.num_inputs = num_inputs - - def add_ode_input(self, int idx, neuron, neural_container, **kwargs): - """ Add relevant external inputs to the ode.""" - # Create a struct to store the inputs and weights to the neuron - cdef HopfOscillatorNeuronInput n - # Get the neuron parameter - neuron_idx = neural_container.outputs.get_parameter_index( - 'nout_'+neuron.n_id) - - # Add the weight parameter - weight = neural_container.weights.add_parameter( - 'w_' + neuron.n_id + '_to_' + self.n_id, kwargs.get('weight', 0.0)) - weight_idx = neural_container.weights.get_parameter_index( - 'w_' + neuron.n_id + '_to_' + self.n_id) - n.neuron_idx = neuron_idx - n.weight_idx = weight_idx - - # Append the struct to the list - self.neuron_inputs[idx] = n - - def output(self): - """Neuron activation function. - Parameters - ---------- - m_potential: float - Neuron membrane potential - """ - return self.c_output() - - def ode_rhs(self, y, w, p): - """ Python interface to the ode_rhs computation.""" - self.c_ode_rhs(y, w, p) - - #################### C-FUNCTIONS #################### - cdef void c_ode_rhs(self, double[:] _y, double[:] _w, double[:] _p): - """ Compute the ODE. Internal Setup Function.""" - # Neuron inputs - cdef double _sum = 0.0 - cdef unsigned int j - cdef double _neuron_out - cdef double _weight - - for j in range(self.num_inputs): - _neuron_out = _y[self.neuron_inputs[j].neuron_idx] - _weight = _w[self.neuron_inputs[j].weight_idx] - _sum += self.c_neuron_inputs_eval(_neuron_out, _weight) - - # sates - cdef double x = self.x.c_get_value() - cdef double y = self.y.c_get_value() - cdef double mu = self.mu - cdef double omega = self.omega - self.xdot.c_set_value( - self.alpha*(self.mu - (x**2 + y**2))*x - self.omega*y - ) - self.ydot.c_set_value( - self.beta*(self.mu - (x**2 + y**2))*y + self.omega*x + ( - self.ext_in.c_get_value() + _sum - ) - ) - - cdef void c_output(self): - """ Neuron output. """ - self.nout.c_set_value(self.y.c_get_value()) - - cdef double c_neuron_inputs_eval(self, double _neuron_out, double _weight): - """ Evaluate neuron inputs.""" - return _neuron_out*_weight diff --git a/farms_network/integrators.pxd b/farms_network/integrators.pxd deleted file mode 100644 index bde90f0..0000000 --- a/farms_network/integrators.pxd +++ /dev/null @@ -1,4 +0,0 @@ -cimport numpy as cnp - - -cpdef cnp.ndarray c_rk4(double time, cnp.ndarray[double] state, func, double step_size) diff --git a/farms_network/integrators.pyx b/farms_network/integrators.pyx deleted file mode 100644 index 975a3e5..0000000 --- a/farms_network/integrators.pyx +++ /dev/null @@ -1,11 +0,0 @@ -import numpy as np - - -cpdef cnp.ndarray c_rk4(double time, cnp.ndarray[double, ndim=1] state, func, double step_size): - """ Runge-kutta order 4 integrator """ - cdef cnp.ndarray[double, ndim=1] K1 = np.asarray(func(time, state)) - cdef cnp.ndarray[double, ndim=1] K2 = np.asarray(func(time + step_size/2, state + (step_size/2 * K1))) - cdef cnp.ndarray[double, ndim=1] K3 = np.asarray(func(time + step_size/2, state + (step_size/2 * K2))) - cdef cnp.ndarray[double, ndim=1] K4 = np.asarray(func(time + step_size, state + (step_size * K3))) - cdef cnp.ndarray[double, ndim=1] new_state = state + (K1 + 2*K2 + 2*K3 + K4)*(step_size/6) - return new_state diff --git a/farms_network/leaky_integrator.pxd b/farms_network/leaky_integrator.pxd deleted file mode 100644 index 7837b49..0000000 --- a/farms_network/leaky_integrator.pxd +++ /dev/null @@ -1,59 +0,0 @@ -""" ------------------------------------------------------------------------ -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ - -Leaky Integrator Neuron. -""" - -from farms_container.parameter cimport Parameter -from farms_network.neuron cimport Neuron - -cdef struct LeakyIntegratorNeuronInput: - int neuron_idx - int weight_idx - -cdef class LeakyIntegrator(Neuron): - cdef: - readonly str n_id - - unsigned int num_inputs - - # parameters - # constants - double tau - double bias - double D - - # states - Parameter m - - # inputs - Parameter ext_in - - # ode - Parameter mdot - - # Ouputs - Parameter nout - - # neuron connenctions - LeakyIntegratorNeuronInput[:] neuron_inputs - - cdef: - void c_ode_rhs(self, double[:] _y, double[:] _w, double[:] _p) - void c_output(self) - double c_neuron_inputs_eval(self, double _neuron_out, double _weight) diff --git a/farms_network/leaky_integrator.pyx b/farms_network/leaky_integrator.pyx deleted file mode 100644 index 78ade81..0000000 --- a/farms_network/leaky_integrator.pyx +++ /dev/null @@ -1,126 +0,0 @@ -""" ------------------------------------------------------------------------ -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ - -Leaky Integrator Neuron. -""" -from libc.math cimport exp -import numpy as np -cimport numpy as cnp - -cdef class LeakyIntegrator(Neuron): - - def __init__(self, n_id, num_inputs, neural_container, **kwargs): - """Initialize. - - Parameters - ---------- - n_id: str - Unique ID for the neuron in the network. - """ - super(LeakyIntegrator, self).__init__('leaky') - # Neuron ID - self.n_id = n_id - # Initialize parameters - (_, self.tau) = neural_container.constants.add_parameter( - 'tau_' + self.n_id, kwargs.get('tau', 0.1)) - (_, self.bias) = neural_container.constants.add_parameter( - 'bias_' + self.n_id, kwargs.get('bias', -2.75)) - # pylint: disable=invalid-name - (_, self.D) = neural_container.constants.add_parameter( - 'D_' + self.n_id, kwargs.get('D', 1.0)) - - # Initialize states - self.m = neural_container.states.add_parameter( - 'm_' + self.n_id, kwargs.get('x0', 0.0))[0] - # External inputs - self.ext_in = neural_container.inputs.add_parameter( - 'ext_in_' + self.n_id)[0] - - # ODE RHS - self.mdot = neural_container.dstates.add_parameter( - 'mdot_' + self.n_id, 0.0)[0] - - # Output - self.nout = neural_container.outputs.add_parameter( - 'nout_' + self.n_id, 0.0)[0] - - # Neuron inputs - self.neuron_inputs = cnp.ndarray((num_inputs,), - dtype=[('neuron_idx', 'i'), - ('weight_idx', 'i')]) - - self.num_inputs = num_inputs - - def add_ode_input(self, int idx, neuron, neural_container, **kwargs): - """ Add relevant external inputs to the ode.""" - # Create a struct to store the inputs and weights to the neuron - cdef LeakyIntegratorNeuronInput n - # Get the neuron parameter - neuron_idx = neural_container.outputs.get_parameter_index( - 'nout_'+neuron.n_id) - - # Add the weight parameter - weight = neural_container.weights.add_parameter( - 'w_' + neuron.n_id + '_to_' + self.n_id, kwargs.get('weight', 0.0)) - weight_idx = neural_container.weights.get_parameter_index( - 'w_' + neuron.n_id + '_to_' + self.n_id) - n.neuron_idx = neuron_idx - n.weight_idx = weight_idx - - # Append the struct to the list - self.neuron_inputs[idx] = n - - def output(self): - """Neuron activation function. - Parameters - ---------- - m_potential: float - Neuron membrane potential - """ - return self.c_output() - - def ode_rhs(self, y, w, p): - """ Python interface to the ode_rhs computation.""" - self.c_ode_rhs(y, w, p) - - #################### C-FUNCTIONS #################### - cdef void c_ode_rhs(self, double[:] _y, double[:] _w, double[:] _p): - """ Compute the ODE. Internal Setup Function.""" - # Neuron inputs - cdef double _sum = 0.0 - cdef unsigned int j - cdef double _neuron_out - cdef double _weight - - for j in range(self.num_inputs): - _neuron_out = _y[self.neuron_inputs[j].neuron_idx] - _weight = _w[self.neuron_inputs[j].weight_idx] - _sum += self.c_neuron_inputs_eval(_neuron_out, _weight) - - self.mdot.c_set_value(( - (-self.m.c_get_value() + _sum + self.ext_in.c_get_value())/self.tau) - ) - - cdef void c_output(self): - """ Neuron output. """ - self.nout.c_set_value(1. / (1. + exp(-self.D * ( - self.m.c_get_value() + self.bias)))) - - cdef double c_neuron_inputs_eval(self, double _neuron_out, double _weight): - """ Evaluate neuron inputs.""" - return _neuron_out*_weight diff --git a/farms_network/lif_danner.pxd b/farms_network/lif_danner.pxd deleted file mode 100644 index b5e446d..0000000 --- a/farms_network/lif_danner.pxd +++ /dev/null @@ -1,85 +0,0 @@ -""" ------------------------------------------------------------------------ -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ - -Leaky Integrate and Fire Neuron Based on Danner et.al. -""" - -from farms_container.parameter cimport Parameter -from libcpp.random cimport mt19937, normal_distribution - -from farms_network.neuron cimport Neuron -from farms_network.utils.ornstein_uhlenbeck cimport OrnsteinUhlenbeckParameters - -cdef struct DannerNeuronInput: - int neuron_idx - int weight_idx - -cdef class LIFDanner(Neuron): - cdef: - readonly str n_id - - unsigned int num_inputs - - # parameters - # constants - double c_m - double g_leak - double e_leak - double v_max - double v_thr - double g_syn_e - double g_syn_i - double e_syn_e - double e_syn_i - double m_e - double m_i - double b_e - double b_i - - double tau_noise - double mu_noise - double sigma_noise - double time_step_noise - unsigned long int seed_noise - - # states - Parameter v - - Parameter state_noise - - # inputs - Parameter alpha - - # ode - Parameter vdot - - # Ouputs - Parameter nout - - # neuron connenctions - DannerNeuronInput[:] neuron_inputs - - # current noise - OrnsteinUhlenbeckParameters noise_params - mt19937 random_mt19937 - normal_distribution[double] distribution - - cdef: - void c_ode_rhs(self, double[:] _y, double[:] _w, double[:] _p) - void c_output(self) - inline double c_neuron_inputs_eval(self, double _neuron_out, double _weight) diff --git a/farms_network/lif_danner.pyx b/farms_network/lif_danner.pyx deleted file mode 100644 index 201a81e..0000000 --- a/farms_network/lif_danner.pyx +++ /dev/null @@ -1,226 +0,0 @@ -""" ------------------------------------------------------------------------ -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ - -Leaky Integrate and Fire Neuron Based on Danner et.al. -""" - -import time -import numpy as np - -cimport numpy as cnp -from libc.math cimport cosh as ccosh -from libc.math cimport exp as cexp -from libc.math cimport fabs as cfabs -from libc.stdio cimport printf - -from farms_network.utils.ornstein_uhlenbeck cimport c_noise_current_update - - -cdef class LIFDanner(Neuron): - """Leaky Integrate and Fire Neuron Based on Danner et.al. - """ - - def __init__(self, n_id, num_inputs, neural_container, **kwargs): - super( - LIFDanner, self).__init__('lif_danner') - - self.n_id = n_id # Unique neuron identifier - # Constants - (_, self.c_m) = neural_container.constants.add_parameter( - 'c_m_' + self.n_id, kwargs.get('c_m', 10.0)) # pF - - (_, self.g_leak) = neural_container.constants.add_parameter( - 'g_leak_' + self.n_id, kwargs.get('g_leak', 2.8)) # : nS - (_, self.e_leak) = neural_container.constants.add_parameter( - 'e_leak_' + self.n_id, kwargs.get('e_leak', -60.0)) # : mV - - (_, self.tau_noise) = neural_container.constants.add_parameter( - 'tau_noise_' + self.n_id, kwargs.get('tau_noise', 10.0)) # ms - (_, self.mu_noise) = neural_container.constants.add_parameter( - 'mu_noise_' + self.n_id, kwargs.get('mu_noise', 0.0)) # - (_, self.sigma_noise) = neural_container.constants.add_parameter( - 'sigma_noise_' + self.n_id, kwargs.get('sigma_noise', 0.005)) # - (_, self.seed_noise) = neural_container.constants.add_parameter( - 'seed_noise_' + self.n_id, kwargs.get('seed_noise', time.thread_time_ns())) # - (_, self.time_step_noise) = neural_container.constants.add_parameter( - 'time_step_noise_' + self.n_id, kwargs.get('time_step_noise', 1e-3/2.0)) # - - self.state_noise = neural_container.parameters.add_parameter( - 'state_noise_' + self.n_id, kwargs.get('state_noise', 0.0))[0] # - - (_, self.v_max) = neural_container.constants.add_parameter( - 'v_max_' + self.n_id, kwargs.get('v_max', 0.0)) # : mV - (_, self.v_thr) = neural_container.constants.add_parameter( - 'v_thr_' + self.n_id, kwargs.get('v_thr', -50.0)) # : mV - - (_, self.g_syn_e) = neural_container.constants.add_parameter( - 'g_syn_e_' + self.n_id, kwargs.get('g_syn_e', 10.0)) # : nS - (_, self.g_syn_i) = neural_container.constants.add_parameter( - 'g_syn_i_' + self.n_id, kwargs.get('g_syn_i', 10.0)) # : nS - (_, self.e_syn_e) = neural_container.constants.add_parameter( - 'e_syn_e_' + self.n_id, kwargs.get('e_syn_e', -10.0)) # : mV - (_, self.e_syn_i) = neural_container.constants.add_parameter( - 'e_syn_i_' + self.n_id, kwargs.get('e_syn_i', -75.0)) # : mV - - (_, self.m_e) = neural_container.constants.add_parameter( - 'm_e_' + self.n_id, kwargs.pop('m_e', 0.0)) # m_E,i - (_, self.m_i) = neural_container.constants.add_parameter( - 'm_i_' + self.n_id, kwargs.pop('m_i', 0.0)) # m_I,i - (_, self.b_e) = neural_container.constants.add_parameter( - 'b_e_' + self.n_id, kwargs.pop('b_e', 0.0)) # m_E,i - (_, self.b_i) = neural_container.constants.add_parameter( - 'b_i_' + self.n_id, kwargs.pop('b_i', 0.0)) # m_I,i - - # State Variables - # pylint: disable=invalid-name - self.v = neural_container.states.add_parameter( - 'V_' + self.n_id, kwargs.get('v0', -60.0))[0] # Membrane potential - - # ODE - self.vdot = neural_container.dstates.add_parameter( - 'vdot_' + self.n_id, 0.0)[0] - - # Output - self.nout = neural_container.outputs.add_parameter( - 'nout_' + self.n_id, 0.0)[0] - - # External Input (BrainStem Drive) - self.alpha = neural_container.inputs.add_parameter( - 'alpha_' + self.n_id, 0.22)[0] - - # Neuron inputs - self.num_inputs = num_inputs - self.neuron_inputs = cnp.ndarray((num_inputs,), - dtype=[('neuron_idx', 'i'), - ('weight_idx', 'i')]) - - # Initialize noisy current - self.random_mt19937 = mt19937(self.seed_noise) - self.distribution = normal_distribution[double](0.0, 1.0) - - self.noise_params = OrnsteinUhlenbeckParameters( - mu=self.mu_noise, - sigma=self.sigma_noise, - tau=self.tau_noise, - dt=self.time_step_noise, - random_generator=self.random_mt19937, - distribution=self.distribution - ) - - def add_ode_input(self, int idx, neuron, neural_container, **kwargs): - """ Add relevant external inputs to the ode. - Parameters - ---------- - """ - - # Create a struct to store the inputs and weights to the neuron - cdef DannerNeuronInput n - # Get the neuron parameter - neuron_idx = neural_container.outputs.get_parameter_index( - 'nout_'+neuron.n_id) - - # Add the weight parameter - weight = neural_container.weights.add_parameter( - 'w_' + neuron.n_id + '_to_' + self.n_id, kwargs.get('weight', 0.0))[0] - weight_idx = neural_container.weights.get_parameter_index( - 'w_' + neuron.n_id + '_to_' + self.n_id) - n.neuron_idx = neuron_idx - n.weight_idx = weight_idx - - # Append the struct to the list - self.neuron_inputs[idx] = n - - def output(self): - """Neuron activation function. - Parameters - ---------- - m_potential: float - Neuron membrane potential - """ - return self.c_output() - - def ode_rhs(self, y, w, p): - """ Python interface to the ode_rhs computation.""" - self.c_ode_rhs(y, w, p) - - #################### C-FUNCTIONS #################### - - cdef void c_ode_rhs(self, double[:] _y, double[:] _w, double[:] _p): - """ Compute the ODE. Internal Setup Function.""" - - # States - cdef double _v = self.v.c_get_value() - - # Drive inputs - cdef double d_e = self.m_e * self.alpha.c_get_value() + self.b_e - cdef double d_i = self.m_i * self.alpha.c_get_value() + self.b_i - - # Ileak - cdef double i_leak = self.g_leak * (_v - self.e_leak) - - # ISyn_Excitatory - cdef double i_syn_e = self.g_syn_e * d_e * (_v - self.e_syn_e) - - # ISyn_Inhibitory - cdef double i_syn_i = self.g_syn_i * d_i * (_v - self.e_syn_i) - - # Neuron inputs - cdef double _sum = 0.0 - cdef unsigned int j - cdef double _neuron_out - cdef double _weight - - for j in range(self.num_inputs): - _neuron_out = _y[self.neuron_inputs[j].neuron_idx] - _weight = _w[self.neuron_inputs[j].weight_idx] - _sum += self.c_neuron_inputs_eval(_neuron_out, _weight) - - # noise current - cdef double i_noise = c_noise_current_update( - self.state_noise.c_get_value(), &(self.noise_params) - ) - self.state_noise.c_set_value(i_noise) - - # dV - self.vdot.c_set_value( - -(i_leak + i_syn_e + i_syn_i + +i_noise + _sum)/self.c_m) - - cdef void c_output(self): - """ Neuron output. """ - cdef double _v = self.v.c_get_value() - cdef double _n_out - - if _v >= self.v_max: - _n_out = 1. - elif (self.v_thr <= _v) and (_v < self.v_max): - _n_out = (_v - self.v_thr) / (self.v_max - self.v_thr) - elif _v < self.v_thr: - _n_out = 0.0 - # Set the neuron output - self.nout.c_set_value(_n_out) - - cdef inline double c_neuron_inputs_eval(self, double _neuron_out, double _weight): - """ Evaluate neuron inputs.""" - cdef double _v = self.v.c_get_value() - - if _weight >= 0.0: - # Excitatory Synapse - return self.g_syn_e*cfabs(_weight)*_neuron_out*(_v - self.e_syn_e) - elif _weight < 0.0: - # Inhibitory Synapse - return self.g_syn_i*cfabs(_weight)*_neuron_out*(_v - self.e_syn_i) diff --git a/farms_network/lif_danner_nap.pxd b/farms_network/lif_danner_nap.pxd deleted file mode 100644 index b32c2d3..0000000 --- a/farms_network/lif_danner_nap.pxd +++ /dev/null @@ -1,98 +0,0 @@ -""" ------------------------------------------------------------------------ -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ - -Leaky Integrate and Fire Neuron Based on Danner et.al. -""" - -from farms_container.parameter cimport Parameter -from libcpp.random cimport mt19937, normal_distribution - -from farms_network.neuron cimport Neuron -from farms_network.utils.ornstein_uhlenbeck cimport OrnsteinUhlenbeckParameters - - -cdef struct DannerNapNeuronInput: - int neuron_idx - int weight_idx - -cdef class LIFDannerNap(Neuron): - cdef: - readonly str n_id - - unsigned int num_inputs - - # parameters - # constants - double c_m - double g_nap - double e_na - double v1_2_m - double k_m - double v1_2_h - double k_h - double v1_2_t - double k_t - double g_leak - double e_leak - double tau_0 - double tau_max - double v_max - double v_thr - double g_syn_e - double g_syn_i - double e_syn_e - double e_syn_i - double m_e - double m_i - double b_e - double b_i - - double tau_noise - double mu_noise - double sigma_noise - double time_step_noise - unsigned long int seed_noise - - # states - Parameter v - Parameter h - - Parameter state_noise - - # inputs - Parameter alpha - - # ode - Parameter vdot - Parameter hdot - - # Ouputs - Parameter nout - - # neuron connenctions - DannerNapNeuronInput[:] neuron_inputs - - # current noise - OrnsteinUhlenbeckParameters noise_params - mt19937 random_mt19937 - normal_distribution[double] distribution - - cdef: - void c_ode_rhs(self, double[:] _y, double[:] _w, double[:] _p) - void c_output(self) - double c_neuron_inputs_eval(self, double _neuron_out, double _weight) diff --git a/farms_network/lif_danner_nap.pyx b/farms_network/lif_danner_nap.pyx deleted file mode 100644 index dcc1100..0000000 --- a/farms_network/lif_danner_nap.pyx +++ /dev/null @@ -1,277 +0,0 @@ -""" ------------------------------------------------------------------------ -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ - -Leaky Integrate and Fire Neuron Based on Danner et.al. -""" - -import time -import numpy as np - -cimport numpy as cnp -from libc.math cimport cosh as ccosh -from libc.math cimport exp as cexp -from libc.math cimport fabs as cfabs -from libc.stdio cimport printf - -from farms_network.utils.ornstein_uhlenbeck cimport c_noise_current_update - - -cdef class LIFDannerNap(Neuron): - """Leaky Integrate and Fire Neuron Based on Danner et.al. - """ - - def __init__(self, n_id, num_inputs, neural_container, **kwargs): - super( - LIFDannerNap, self).__init__('lif_danner_nap') - - self.n_id = n_id # Unique neuron identifier - - # Constants - (_, self.c_m) = neural_container.constants.add_parameter( - 'c_m_' + self.n_id, kwargs.get('c_m', 10.0)) # pF - - (_, self.g_nap) = neural_container.constants.add_parameter( - 'g_nap_'+self.n_id, kwargs.get('g_nap', 4.5)) # nS - (_, self.e_na) = neural_container.constants.add_parameter( - 'e_na_'+self.n_id, kwargs.get('e_na', 50.0)) # mV - - (_, self.v1_2_m) = neural_container.constants.add_parameter( - 'v1_2_m_' + self.n_id, kwargs.get('v1_2_m', -40.0)) # mV - (_, self.k_m) = neural_container.constants.add_parameter( - 'k_m_' + self.n_id, kwargs.get('k_m', -6.0)) # mV - - (_, self.v1_2_h) = neural_container.constants.add_parameter( - 'v1_2_h_' + self.n_id, kwargs.get('v1_2_h', -45.0)) # mV - (_, self.k_h) = neural_container.constants.add_parameter( - 'k_h_' + self.n_id, kwargs.get('k_h', 4.0)) # mV - - (_, self.v1_2_t) = neural_container.constants.add_parameter( - 'v1_2_t_' + self.n_id, kwargs.get('v1_2_t', -35.0)) # mV - (_, self.k_t) = neural_container.constants.add_parameter( - 'k_t_' + self.n_id, kwargs.get('k_t', 15.0)) # mV - - (_, self.g_leak) = neural_container.constants.add_parameter( - 'g_leak_' + self.n_id, kwargs.get('g_leak', 4.5)) # nS - (_, self.e_leak) = neural_container.constants.add_parameter( - 'e_leak_' + self.n_id, kwargs.get('e_leak', -62.5)) # mV - - (_, self.tau_0) = neural_container.constants.add_parameter( - 'tau_0_' + self.n_id, kwargs.get('tau_0', 80.0)) # ms - (_, self.tau_max) = neural_container.constants.add_parameter( - 'tau_max_' + self.n_id, kwargs.get('tau_max', 160.0)) # ms - - (_, self.tau_noise) = neural_container.constants.add_parameter( - 'tau_noise_' + self.n_id, kwargs.get('tau_noise', 10.0)) # ms - (_, self.mu_noise) = neural_container.constants.add_parameter( - 'mu_noise_' + self.n_id, kwargs.get('mu_noise', 0.0)) # - (_, self.sigma_noise) = neural_container.constants.add_parameter( - 'sigma_noise_' + self.n_id, kwargs.get('sigma_noise', 0.005)) # - (_, self.seed_noise) = neural_container.constants.add_parameter( - 'seed_noise_' + self.n_id, kwargs.get('seed_noise', time.thread_time_ns())) # - (_, self.time_step_noise) = neural_container.constants.add_parameter( - 'time_step_noise_' + self.n_id, kwargs.get('time_step_noise', 1e-3/2.0)) # - - self.state_noise = neural_container.parameters.add_parameter( - 'state_noise_' + self.n_id, kwargs.get('state_noise', 0.0))[0] # - - (_, self.v_max) = neural_container.constants.add_parameter( - 'v_max_' + self.n_id, kwargs.get('v_max', 0.0)) # mV - (_, self.v_thr) = neural_container.constants.add_parameter( - 'v_thr_' + self.n_id, kwargs.get('v_thr', -50.0)) # mV - - (_, self.g_syn_e) = neural_container.constants.add_parameter( - 'g_syn_e_' + self.n_id, kwargs.get('g_syn_e', 10.0)) # nS - (_, self.g_syn_i) = neural_container.constants.add_parameter( - 'g_syn_i_' + self.n_id, kwargs.get('g_syn_i', 10.0)) # nS - (_, self.e_syn_e) = neural_container.constants.add_parameter( - 'e_syn_e_' + self.n_id, kwargs.get('e_syn_e', -10.0)) # mV - (_, self.e_syn_i) = neural_container.constants.add_parameter( - 'e_syn_i_' + self.n_id, kwargs.get('e_syn_i', -75.0)) # mV - - (_, self.m_e) = neural_container.constants.add_parameter( - 'm_e_' + self.n_id, kwargs.pop('m_e', 0.0)) # m_E,i - (_, self.m_i) = neural_container.constants.add_parameter( - 'm_i_' + self.n_id, kwargs.pop('m_i', 0.0)) # m_I,i - (_, self.b_e) = neural_container.constants.add_parameter( - 'b_e_' + self.n_id, kwargs.pop('b_e', 0.0)) # m_E,i - (_, self.b_i) = neural_container.constants.add_parameter( - 'b_i_' + self.n_id, kwargs.pop('b_i', 0.0)) # m_I,i - - # State Variables - # pylint: disable=invalid-name - self.v = neural_container.states.add_parameter( - 'V_' + self.n_id, kwargs.get('v0', -60.0))[0] # Membrane potential - self.h = neural_container.states.add_parameter( - 'h_' + self.n_id, kwargs.get('h0', np.random.uniform(0, 1)))[0] - - # ODE - self.vdot = neural_container.dstates.add_parameter( - 'vdot_' + self.n_id, 0.0)[0] - self.hdot = neural_container.dstates.add_parameter( - 'hdot_' + self.n_id, 0.0)[0] - - # Ouput - self.nout = neural_container.outputs.add_parameter( - 'nout_' + self.n_id, 0.0)[0] - - # External Input (BrainStem Drive) - self.alpha = neural_container.inputs.add_parameter( - 'alpha_' + self.n_id, 0.22)[0] - - # Neuron inputs - self.num_inputs = num_inputs - self.neuron_inputs = cnp.ndarray((num_inputs,), - dtype=[('neuron_idx', 'i'), - ('weight_idx', 'i')]) - - # Initialize noisy current - self.random_mt19937 = mt19937(self.seed_noise) - self.distribution = normal_distribution[double](0.0, 1.0) - - self.noise_params = OrnsteinUhlenbeckParameters( - mu=self.mu_noise, - sigma=self.sigma_noise, - tau=self.tau_noise, - dt=self.time_step_noise, - random_generator=self.random_mt19937, - distribution=self.distribution - ) - - - def add_ode_input(self, int idx, neuron, neural_container, **kwargs): - """ Add relevant external inputs to the ode. - Parameters - ---------- - """ - - # Create a struct to store the inputs and weights to the neuron - cdef DannerNapNeuronInput n - # Get the neuron parameter - neuron_idx = neural_container.outputs.get_parameter_index( - 'nout_'+neuron.n_id) - - # Add the weight parameter - weight = neural_container.weights.add_parameter( - 'w_' + neuron.n_id + '_to_' + self.n_id, kwargs.get('weight', 0.0))[0] - weight_idx = neural_container.weights.get_parameter_index( - 'w_' + neuron.n_id + '_to_' + self.n_id) - n.neuron_idx = neuron_idx - n.weight_idx = weight_idx - - # Append the struct to the list - self.neuron_inputs[idx] = n - - def output(self): - """Neuron activation function. - Parameters - ---------- - m_potential: float - Neuron membrane potential - """ - return self.c_output() - - def ode_rhs(self, y, w, p): - """ Python interface to the ode_rhs computation.""" - self.c_ode_rhs(y, w, p) - - #################### C-FUNCTIONS #################### - - cdef void c_ode_rhs(self, double[:] _y, double[:] _w, double[:] _p): - """ Compute the ODE. Internal Setup Function.""" - - # States - cdef double _v = self.v.c_get_value() - cdef double _h = self.h.c_get_value() - - # Drive inputs - cdef double d_e = self.m_e * self.alpha.c_get_value() + self.b_e - cdef double d_i = self.m_i * self.alpha.c_get_value() + self.b_i - - # tau_h(V) - cdef double tau_h = self.tau_0 + (self.tau_max - self.tau_0) / \ - ccosh((_v - self.v1_2_t) / self.k_t) - - # h_inf(V) - cdef double h_inf = 1./(1.0 + cexp((_v - self.v1_2_h) / self.k_h)) - - # m(V) - cdef double m = 1./(1.0 + cexp((_v - self.v1_2_m) / self.k_m)) - - # Inap - # pylint: disable=no-member - cdef double i_nap = self.g_nap * m * _h * (_v - self.e_na) - - # Ileak - cdef double i_leak = self.g_leak * (_v - self.e_leak) - - # ISyn_Excitatory - cdef double i_syn_e = self.g_syn_e * d_e * (_v - self.e_syn_e) - - # ISyn_Inhibitory - cdef double i_syn_i = self.g_syn_i * d_i * (_v - self.e_syn_i) - - # Neuron inputs - cdef double _sum = 0.0 - cdef unsigned int j - cdef double _neuron_out - cdef double _weight - - for j in range(self.num_inputs): - _neuron_out = _y[self.neuron_inputs[j].neuron_idx] - _weight = _w[self.neuron_inputs[j].weight_idx] - _sum += self.c_neuron_inputs_eval(_neuron_out, _weight) - - # Slow inactivation - self.hdot.c_set_value((h_inf - _h) / tau_h) - - # noise current - cdef double i_noise = c_noise_current_update( - self.state_noise.c_get_value(), &(self.noise_params) - ) - self.state_noise.c_set_value(i_noise) - - # dV - self.vdot.c_set_value( - -(i_nap + i_leak + i_syn_e + i_syn_i + i_noise + _sum)/self.c_m) - - cdef void c_output(self): - """ Neuron output. """ - cdef double _v = self.v.c_get_value() - cdef double _n_out - - if _v >= self.v_max: - _n_out = 1. - elif self.v_thr <= _v < self.v_max: - _n_out = (_v - self.v_thr) / (self.v_max - self.v_thr) - else: - _n_out = 0.0 - # Set the neuron output - self.nout.c_set_value(_n_out) - - cdef double c_neuron_inputs_eval(self, double _neuron_out, double _weight): - """ Evaluate neuron inputs.""" - cdef double _v = self.v.c_get_value() - - if _weight >= 0.0: - # Excitatory Synapse - return ( - self.g_syn_e*cfabs(_weight)*_neuron_out*(_v - self.e_syn_e)) - elif _weight < 0.0: - # Inhibitory Synapse - return ( - self.g_syn_i*cfabs(_weight)*_neuron_out*(_v - self.e_syn_i)) diff --git a/farms_network/lif_daun_interneuron.pxd b/farms_network/lif_daun_interneuron.pxd deleted file mode 100644 index 261cca0..0000000 --- a/farms_network/lif_daun_interneuron.pxd +++ /dev/null @@ -1,75 +0,0 @@ -""" ------------------------------------------------------------------------ -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ - -Leaky Integrate and Fire InterNeuron Based on Daun et.al. -""" - -from farms_container.parameter cimport Parameter -from farms_network.neuron cimport Neuron - -cdef struct DaunInterNeuronInput: - int neuron_idx - int g_syn_idx - int e_syn_idx - int gamma_s_idx - int v_h_s_idx - -cdef class LIFDaunInterneuron(Neuron): - cdef: - readonly str n_id - - unsigned int num_inputs - - # parameters - # constants - double c_m - double g_nap - double e_nap - double v_h_h - double gamma_h - double v_t_h - double eps - double gamma_t - double v_h_m - double gamma_m - double g_leak - double e_leak - - # states - Parameter v - Parameter h - - # inputs - Parameter g_app - Parameter e_app - - # ode - Parameter vdot - Parameter hdot - - # Ouputs - Parameter nout - - # neuron connenctions - DaunInterNeuronInput[:] neuron_inputs - - cdef: - void c_ode_rhs(self, double[:] _y, double[:] _w, double[:] _p) - void c_output(self) - double c_neuron_inputs_eval(self, double _neuron_out, double _g_syn, double _e_syn, - double _gamma_s, double _v_h_s) diff --git a/farms_network/lif_daun_interneuron.pyx b/farms_network/lif_daun_interneuron.pyx deleted file mode 100644 index 7b27e5b..0000000 --- a/farms_network/lif_daun_interneuron.pyx +++ /dev/null @@ -1,231 +0,0 @@ -""" ------------------------------------------------------------------------ -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ - -Leaky Integrate and Fire Interneuron. Daun et -""" -from libc.stdio cimport printf -import numpy as np -from libc.math cimport exp as cexp -from libc.math cimport cosh as ccosh -from libc.math cimport fabs as cfabs -cimport numpy as cnp - - -cdef class LIFDaunInterneuron(Neuron): - """Leaky Integrate and Fire Interneuron. - Based on Silvia Daun and Tbor's model. - """ - - def __init__(self, n_id, num_inputs, neural_container, **kwargs): - super(LIFDaunInterneuron, self).__init__('lif_daun_interneuron') - - self.n_id = n_id # Unique neuron identifier - # Constants - (_, self.g_nap) = neural_container.constants.add_parameter( - 'g_nap_' + self.n_id, kwargs.get('g_nap', 10.0)) - (_, self.e_nap) = neural_container.constants.add_parameter( - 'e_nap_' + self.n_id, kwargs.get('e_nap', 50.0)) - - # Parameters of h - (_, self.v_h_h) = neural_container.constants.add_parameter( - 'v_h_h_' + self.n_id, kwargs.get('v_h_h', -30.0)) - (_, self.gamma_h) = neural_container.constants.add_parameter( - 'gamma_h_' + self.n_id, kwargs.get('gamma_h', 0.1667)) - - # Parameters of tau - (_, self.v_t_h) = neural_container.constants.add_parameter( - 'v_t_h_' + self.n_id, kwargs.get('v_t_h', -30.0)) - (_, self.eps) = neural_container.constants.add_parameter( - 'eps_' + self.n_id, kwargs.get('eps', 0.0023)) - (_, self.gamma_t) = neural_container.constants.add_parameter( - 'gamma_t_' + self.n_id, kwargs.get('gamma_t', 0.0833)) - - # Parameters of m - (_, self.v_h_m) = neural_container.constants.add_parameter( - 'v_h_m_' + self.n_id, kwargs.get('v_h_m', -37.0)) - (_, self.gamma_m) = neural_container.constants.add_parameter( - 'gamma_m_' + self.n_id, kwargs.get('gamma_m', -0.1667)) - - # Parameters of Ileak - (_, self.g_leak) = neural_container.constants.add_parameter( - 'g_leak_' + self.n_id, kwargs.get('g_leak', 2.8)) - (_, self.e_leak) = neural_container.constants.add_parameter( - 'e_leak_' + self.n_id, kwargs.get('e_leak', -65.0)) - - # Other constants - (_, self.c_m) = neural_container.constants.add_parameter( - 'c_m_' + self.n_id, kwargs.get('c_m', 0.9154)) - - # State Variables - # pylint: disable=invalid-name - # Membrane potential - self.v = neural_container.states.add_parameter( - 'V_' + self.n_id, kwargs.get('v0', -60.0))[0] - self.h = neural_container.states.add_parameter( - 'h_' + self.n_id, kwargs.get('h0', 0.0))[0] - - # ODE - self.vdot = neural_container.dstates.add_parameter( - 'vdot_' + self.n_id, 0.0)[0] - self.hdot = neural_container.dstates.add_parameter( - 'hdot_' + self.n_id, 0.0)[0] - - # External Input - self.g_app = neural_container.inputs.add_parameter( - 'g_app_' + self.n_id, kwargs.get('g_app', 0.2))[0] - self.e_app = neural_container.inputs.add_parameter( - 'e_app_' + self.n_id, kwargs.get('e_app', 0.0))[0] - - # Output - self.nout = neural_container.outputs.add_parameter( - 'nout_' + self.n_id, 0.0)[0] - - # Neuron inputs - self.num_inputs = num_inputs - self.neuron_inputs = cnp.ndarray((num_inputs,), - dtype=[('neuron_idx', 'i'), - ('g_syn_idx', 'i'), - ('e_syn_idx', 'i'), - ('gamma_s_idx', 'i'), - ('v_h_s_idx', 'i')]) - - def add_ode_input(self, int idx, neuron, neural_container, **kwargs): - """ Add relevant external inputs to the ode. - Parameters - ---------- - neuron : - Neuron model from which the input is received. - weight : - Strength of the synapse between the two neurons""" - - # Create a struct to store the inputs and weights to the neuron - cdef DaunInterNeuronInput n - # Get the neuron parameter - neuron_idx = neural_container.outputs.get_parameter_index( - 'nout_'+neuron.n_id) - - g_syn = neural_container.parameters.add_parameter( - 'g_syn_' + self.n_id, kwargs.pop('g_syn', 0.0))[0] - e_syn = neural_container.parameters.add_parameter( - 'e_syn_' + self.n_id, kwargs.pop('e_syn', 0.0))[0] - gamma_s = neural_container.parameters.add_parameter( - 'gamma_s_' + self.n_id, kwargs.pop('gamma_s', 0.0))[0] - v_h_s = neural_container.parameters.add_parameter( - 'v_h_s_' + self.n_id, kwargs.pop('v_h_s', 0.0))[0] - - # Get neuron parameter indices - g_syn_idx = neural_container.parameters.get_parameter_index( - 'g_syn_' + self.n_id) - e_syn_idx = neural_container.parameters.get_parameter_index( - 'e_syn_' + self.n_id) - gamma_s_idx = neural_container.parameters.get_parameter_index( - 'gamma_s_' + self.n_id) - v_h_s_idx = neural_container.parameters.get_parameter_index( - 'v_h_s_' + self.n_id) - - # Add the indices to the struct - n.neuron_idx = neuron_idx - n.g_syn_idx = g_syn_idx - n.e_syn_idx = e_syn_idx - n.gamma_s_idx = gamma_s_idx - n.v_h_s_idx = v_h_s_idx - - # Append the struct to the list - self.neuron_inputs[idx] = n - - def output(self): - """Neuron activation function. - Parameters - ---------- - m_potential: float - Neuron membrane potential - """ - return self.c_output() - - def ode_rhs(self, y, w, p): - """ Python interface to the ode_rhs computation.""" - self.c_ode_rhs(y, w, p) - - #################### C-FUNCTIONS #################### - cdef void c_ode_rhs(self, double[:] _y, double[:] _w, double[:] _p): - """ Compute the ODE. Internal Setup Function.""" - - # States - cdef double _v = self.v.c_get_value() - cdef double _h = self.h.c_get_value() - - # tau_h(V) - cdef double tau_h = 1./(self.eps*ccosh(self.gamma_t*(_v - self.v_t_h))) - - # h_inf(V) - cdef double h_inf = 1./(1. + cexp(self.gamma_h*(_v - self.v_h_h))) - - # m_inf(V) - cdef double m_inf = 1./(1. + cexp(self.gamma_m*(_v - self.v_h_m))) - - # Inap - # pylint: disable=no-member - cdef double i_nap = self.g_nap * m_inf * _h * (_v - self.e_nap) - - # Ileak - cdef double i_leak = self.g_leak * (_v - self.e_leak) - - # Iapp - cdef double i_app = self.g_app.c_get_value() * ( - _v - self.e_app.c_get_value()) - - # Neuron inputs - cdef double _sum = 0.0 - cdef unsigned int j - cdef double _neuron_out - cdef double _g_syn - cdef double _e_syn - cdef double _gamma_s - cdef double _v_h_s - cdef DaunInterNeuronInput _neuron - - for j in range(self.num_inputs): - _neuron = self.neuron_inputs[j] - _neuron_out = _y[_neuron.neuron_idx] - _g_syn = _p[_neuron.g_syn_idx] - _e_syn = _p[_neuron.e_syn_idx] - _gamma_s = _p[_neuron.gamma_s_idx] - _v_h_s = _p[_neuron.v_h_s_idx] - _sum += self.c_neuron_inputs_eval( - _neuron_out, _g_syn, _e_syn, _gamma_s, _v_h_s) - - # Slow inactivation - self.hdot.c_set_value((h_inf - _h)/tau_h) - - # dV - self.vdot.c_set_value((-i_nap - i_leak - i_app - _sum)/self.c_m) - - cdef void c_output(self): - """ Neuron output. """ - # Set the neuron output - self.nout.c_set_value(self.v.c_get_value()) - - cdef double c_neuron_inputs_eval( - self, double _neuron_out, double _g_syn, double _e_syn, - double _gamma_s, double _v_h_s): - """ Evaluate neuron inputs.""" - cdef double _v = self.v.c_get_value() - - cdef double _s_inf = 1./(1. + cexp(_gamma_s*(_neuron_out - _v_h_s))) - - return _g_syn*_s_inf*(_v - _e_syn) diff --git a/farms_network/matsuoka_neuron.pxd b/farms_network/matsuoka_neuron.pxd deleted file mode 100644 index af59c22..0000000 --- a/farms_network/matsuoka_neuron.pxd +++ /dev/null @@ -1,67 +0,0 @@ -""" ------------------------------------------------------------------------ -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ - -Matsuoka Neuron model. -""" - -from farms_container.parameter cimport Parameter -from farms_network.neuron cimport Neuron - -cdef struct MatsuokaNeuronInput: - int neuron_idx - int weight_idx - int phi_idx - -cdef class MatsuokaNeuron(Neuron): - cdef: - readonly str n_id - - unsigned int num_inputs - - # parameters - # constants - double c - double b - double tau - double T - double theta - double nu - - # states - Parameter V - Parameter w - - # inputs - Parameter ext_in - - # ode - Parameter V_dot - Parameter w_dot - - # Ouputs - Parameter nout - - # neuron connenctions - MatsuokaNeuronInput[:] neuron_inputs - - cdef: - void c_ode_rhs(self, double[:] _y, double[:] _w, double[:] _p) - void c_output(self) - cdef double c_neuron_inputs_eval( - self, double _neuron_out, double _weight, double _phi, - double _V, double _w) diff --git a/farms_network/matsuoka_neuron.pyx b/farms_network/matsuoka_neuron.pyx deleted file mode 100644 index cb9e78d..0000000 --- a/farms_network/matsuoka_neuron.pyx +++ /dev/null @@ -1,168 +0,0 @@ -""" ------------------------------------------------------------------------ -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ -Matsuoka Neuron model -""" -from libc.stdio cimport printf -import farms_pylog as pylog -import numpy as np -cimport numpy as cnp - - -cdef class MatsuokaNeuron(Neuron): - - def __init__(self, n_id, num_inputs, neural_container, **kwargs): - """Initialize. - - Parameters - ---------- - n_id: str - Unique ID for the neuron in the network. - """ - super(MatsuokaNeuron, self).__init__('matsuoka_neuron') - - # Neuron ID - self.n_id = n_id - - # Initialize parameters - - (_, self.c) = neural_container.constants.add_parameter( - 'c_' + self.n_id, kwargs.get('c', 1)) - - (_, self.b) = neural_container.constants.add_parameter( - 'b_' + self.n_id, kwargs.get('b', 1)) - - (_, self.tau) = neural_container.constants.add_parameter( - 'tau_' + self.n_id, kwargs.get('tau', 1)) - - (_, self.T) = neural_container.constants.add_parameter( - 'T_' + self.n_id, kwargs.get('T', 12)) - - (_, self.theta) = neural_container.constants.add_parameter( - 'theta_' + self.n_id, kwargs.get('theta', 0.0)) - - (_, self.nu) = neural_container.constants.add_parameter( - 'nu' + self.n_id, kwargs.get('nu', 0.5)) - - # Initialize states - self.V = neural_container.states.add_parameter( - 'V_' + self.n_id, kwargs.get('V0', 0.0))[0] - self.w = neural_container.states.add_parameter( - 'w_' + self.n_id, kwargs.get('w0', 0.5))[0] - - # External inputs - self.ext_in = neural_container.inputs.add_parameter( - 'ext_in_' + self.n_id)[0] - - # ODE RHS - self.V_dot = neural_container.dstates.add_parameter( - 'V_dot_' + self.n_id, 0.0)[0] - self.w_dot = neural_container.dstates.add_parameter( - 'w_dot_' + self.n_id, 0.0)[0] - - # Output - self.nout = neural_container.outputs.add_parameter( - 'nout_' + self.n_id, 0.0)[0] - - # Neuron inputs - self.neuron_inputs = cnp.ndarray((num_inputs,), - dtype=[('neuron_idx', 'i'), - ('weight_idx', 'i'), - ('phi_idx', 'i')]) - - self.num_inputs = num_inputs - - def add_ode_input(self, int idx, neuron, neural_container, **kwargs): - """ Add relevant external inputs to the ode.""" - # Create a struct to store the inputs and weights to the neuron - cdef MatsuokaNeuronInput n - # Get the neuron parameter - neuron_idx = neural_container.outputs.get_parameter_index( - 'nout_'+neuron.n_id) - - # Add the weight parameter - weight = neural_container.weights.add_parameter( - 'w_' + neuron.n_id + '_to_' + self.n_id, - kwargs.get('weight', 2.5))[0] - phi = neural_container.parameters.add_parameter( - 'phi_' + neuron.n_id + '_to_' + self.n_id, - kwargs.get('phi', 0.0))[0] - - weight_idx = neural_container.weights.get_parameter_index( - 'w_' + neuron.n_id + '_to_' + self.n_id) - phi_idx = neural_container.parameters.get_parameter_index( - 'phi_' + neuron.n_id + '_to_' + self.n_id) - - n.neuron_idx = neuron_idx - n.weight_idx = weight_idx - n.phi_idx = phi_idx - # Append the struct to the list - self.neuron_inputs[idx] = n - - def output(self): - """Neuron activation function. - Parameters - ---------- - m_potential: float - Neuron membrane potential - """ - return self.c_output() - - def ode_rhs(self, y, w, p): - """ Python interface to the ode_rhs computation.""" - self.c_ode_rhs(y, w, p) - #################### C-FUNCTIONS #################### - cdef void c_ode_rhs(self, double[:] _y, double[:] _w, double[:] _p): - """ Compute the ODE. Internal Setup Function.""" - - # Current state - cdef double _V = self.V.c_get_value() - cdef double _W = self.w.c_get_value() - - # Neuron inputs - cdef double _sum = 0.0 - cdef unsigned int j - cdef double _neuron_out - cdef double _weight - cdef double _phi - - for j in range(self.num_inputs): - _neuron_out = _y[self.neuron_inputs[j].neuron_idx] - _weight = _w[self.neuron_inputs[j].weight_idx] - _phi = _p[self.neuron_inputs[j].phi_idx] - _sum += self.c_neuron_inputs_eval(_neuron_out, - _weight, _phi, _V, _W) - - # phidot : V_dot - self.V_dot.c_set_value((1/self.tau)*(self.c - _V - _sum - self.b*_W)) - - # wdot - self.w_dot.c_set_value((1/self.T)*(-_W + self.nu*_V)) - - cdef void c_output(self): - """ Neuron output. """ - _V = self.V.c_get_value() - if _V < 0: - self.nout.c_set_value(max(-1, _V)) - else: - self.nout.c_set_value(min(1, _V)) - - cdef double c_neuron_inputs_eval( - self, double _neuron_out, double _weight, double _phi, - double _V, double _w): - """ Evaluate neuron inputs.""" - return _weight*_neuron_out diff --git a/farms_network/models/__init__.py b/farms_network/models/__init__.py new file mode 100644 index 0000000..d3ecbbf --- /dev/null +++ b/farms_network/models/__init__.py @@ -0,0 +1,44 @@ +from abc import ABC +from enum import Enum, unique +from typing import Type, Union + + +class BaseTypes(Enum): + """ Base class for enum types""" + + @classmethod + def to_str(cls, value: Union[str, Type]) -> Type: + if isinstance(value, cls): + return value.value + if value in cls._value2member_map_: + return value + valid_types = ", ".join(type.value for type in cls) + raise ValueError(f"Invalid type '{value}'. Must be one of: {valid_types}") + + +@unique +class Models(str, BaseTypes): + BASE = "base" + RELAY = "relay" + LINEAR = "linear" + RELU = "relu" + OSCILLATOR = "oscillator" + HOPF_OSCILLATOR = "hopf_oscillator" + MORPHED_OSCILLATOR = "morphed_oscillator" + MATSUOKA = "matsuoka" + FITZHUGH_NAGUMO = "fitzhugh_nagumo" + MORRIS_LECAR = "morris_lecar" + LEAKY_INTEGRATOR = "leaky_integrator" + LI_DANNER = "li_danner" + LI_NAP_DANNER = "li_nap_danner" + LI_DAUN = "li_daun" + HH_DAUN = "hh_daun" + + +@unique +class EdgeTypes(str, BaseTypes): + GENERIC = "generic" + EXCITATORY = "excitatory" + INHIBITORY = "inhibitory" + CHOLINERGIC = "cholinergic" + PHASE_COUPLING = "phase_coupling" diff --git a/farms_network/models/factory.py b/farms_network/models/factory.py new file mode 100644 index 0000000..a2b49bb --- /dev/null +++ b/farms_network/models/factory.py @@ -0,0 +1,130 @@ +""" Factory class for generating the node and edges. """ + +from abc import ABC +from typing import Dict, Type, Union + +from farms_network.core.node import Node +from farms_network.core.edge import Edge +from farms_network.models import Models +# from farms_network.models.fitzhugh_nagumo import FitzhughNagumo +# from farms_network.models.hh_daun_motoneuron import HHDaunMotoneuron +from farms_network.models.hopf_oscillator import HopfOscillatorNode +# from farms_network.models.leaky_integrator import LeakyIntegratorNode +from farms_network.models.li_danner import LIDannerNode +from farms_network.models.li_nap_danner import LINaPDannerNode +from farms_network.models.linear import LinearNode +# from farms_network.models.lif_daun_interneuron import LIFDaunInterneuron +# from farms_network.models.matsuoka_node import MatsuokaNode +# from farms_network.models.morphed_oscillator import MorphedOscillator +# from farms_network.models.morris_lecar import MorrisLecarNode +from farms_network.models.oscillator import OscillatorNode +from farms_network.models.oscillator import OscillatorEdge +from farms_network.models.relay import RelayNode +from farms_network.models.relu import ReLUNode + + +class BaseFactory(ABC): + """ Base Factory implementation """ + + _registry: Dict = {} + + @classmethod + def available_types(cls) -> list[str]: + """Get list of registered node types. + + Returns: + Sorted list of registered node type identifiers + """ + return list(cls._registry.keys()) + + @classmethod + def create(cls, item_type: Union[str, Models]) -> Node: + """Create a item instance of the specified type. + + Args: + item_type: Type identifier of item to create + + Returns: + Instance of requested item class + + Raises: + KeyError: If item_type is not registered + """ + try: + item_class = cls._registry[item_type] + return item_class + except KeyError: + available = ', '.join(cls._registry.keys()) + raise KeyError( + f"Unknown item type: {item_type}. " + f"Available types: {available}" + ) + + @classmethod + def register(cls, item_type, item_class) -> None: + """Register a new item type. + + Args: + item_type: Unique identifier for the item + item_class: Node class to register, must inherit from Node + + Raises: + TypeError: If item_class doesn't inherit from Node + ValueError: If item_type is already registered + """ + if not issubclass(item_class, cls.get_base_type()): + raise TypeError( + f"Class must inherit from {cls.get_base_type()}: {item_class}" + ) + if item_type in cls._registry: + raise ValueError(f"Type already registered: {item_type}") + cls._registry[item_type] = item_class + + @classmethod + def get_base_type(cls): + """Get the base type for factory products. + + Must be implemented by subclasses. + + Returns: + Base type that all products must inherit from + """ + raise NotImplementedError + + +class NodeFactory(BaseFactory): + """Implementation of Factory Node class. + """ + _registry: Dict[Models, Type[Node]] = { + Models.BASE: Node, + Models.RELAY: RelayNode, + Models.LINEAR: LinearNode, + Models.RELU: ReLUNode, + Models.OSCILLATOR: OscillatorNode, + Models.HOPF_OSCILLATOR: HopfOscillatorNode, + # Models.MORPHED_OSCILLATOR: MorphedOscillatorNode, + # Models.MATSUOKA: MatsuokaNode, + # Models.FITZHUGH_NAGUMO: FitzhughNagumoNode, + # Models.MORRIS_LECAR: MorrisLecarNode, + # Models.LEAKY_INTEGRATOR: LeakyIntegratorNode, + Models.LI_DANNER: LIDannerNode, + Models.LI_NAP_DANNER: LINaPDannerNode, + # Models.LI_DAUN: LIDaunNode, + # Models.HH_DAUN: HHDaunNode, + } + + @classmethod + def get_base_type(cls) -> Type[Node]: + return Node + + +class EdgeFactory(BaseFactory): + """Implementation of Factory Edge class.""" + _registry: Dict[Models, Type[Edge]] = { + Models.BASE: Edge, + Models.OSCILLATOR: OscillatorEdge, + } + + @classmethod + def get_base_type(cls) -> Type[Edge]: + return Edge diff --git a/farms_network/models/fitzhugh_nagumo_cy.pxd b/farms_network/models/fitzhugh_nagumo_cy.pxd new file mode 100644 index 0000000..bb32ad7 --- /dev/null +++ b/farms_network/models/fitzhugh_nagumo_cy.pxd @@ -0,0 +1,21 @@ +""" +---------------------------------------------------------------------- +Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty +Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +----------------------------------------------------------------------- + +Fitzhugh Nagumo model. + +""" diff --git a/farms_network/models/fitzhugh_nagumo_cy.pyx b/farms_network/models/fitzhugh_nagumo_cy.pyx new file mode 100644 index 0000000..bb32ad7 --- /dev/null +++ b/farms_network/models/fitzhugh_nagumo_cy.pyx @@ -0,0 +1,21 @@ +""" +---------------------------------------------------------------------- +Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty +Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +----------------------------------------------------------------------- + +Fitzhugh Nagumo model. + +""" diff --git a/farms_network/models/hh_daun_motoneuron_cy.pxd b/farms_network/models/hh_daun_motoneuron_cy.pxd new file mode 100644 index 0000000..e69de29 diff --git a/farms_network/models/hh_daun_motoneuron_cy.pyx b/farms_network/models/hh_daun_motoneuron_cy.pyx new file mode 100644 index 0000000..e69de29 diff --git a/farms_network/models/hopf_oscillator.py b/farms_network/models/hopf_oscillator.py new file mode 100644 index 0000000..7927dd3 --- /dev/null +++ b/farms_network/models/hopf_oscillator.py @@ -0,0 +1,14 @@ +from farms_network.core.node import Node +from farms_network.models import Models +from farms_network.core.options import HopfOscillatorNodeOptions +from farms_network.models.hopf_oscillator_cy import HopfOscillatorNodeCy + + +class HopfOscillatorNode(Node): + + CY_NODE_CLASS = HopfOscillatorNodeCy + + def __init__(self, name: str, **kwargs): + super().__init__(name=name, model=Models.OSCILLATOR, **kwargs) + + # Hopf Oscillator-specific properties diff --git a/farms_network/models/hopf_oscillator_cy.pxd b/farms_network/models/hopf_oscillator_cy.pxd new file mode 100644 index 0000000..333412e --- /dev/null +++ b/farms_network/models/hopf_oscillator_cy.pxd @@ -0,0 +1,56 @@ +""" Hopf-Oscillator model """ + + +from ..core.node_cy cimport node_t, node_inputs_t, processed_inputs_t, NodeCy +from ..core.edge_cy cimport edge_t + + +cdef enum: + #STATES + NSTATES = 2 + STATE_X = 0 + STATE_Y= 1 + + +cdef packed struct hopf_oscillator_params_t: + + double mu + double omega + double alpha + double beta + + +cdef void hopf_oscillator_input_tf( + double time, + const double* states, + const node_inputs_t inputs, + const node_t* node, + const edge_t** edges, + processed_inputs_t* out +) noexcept + + +cdef void hopf_oscillator_ode( + double time, + const double* states, + double* derivatives, + processed_inputs_t input_vals, + double noise, + const node_t* node, +) noexcept + + +cdef double hopf_oscillator_output_tf( + double time, + const double* states, + processed_inputs_t input_vals, + double noise, + const node_t* node, +) noexcept + + +cdef class HopfOscillatorNodeCy(NodeCy): + """ Python interface to HopfOscillator Node C-Structure """ + + cdef: + hopf_oscillator_params_t params diff --git a/farms_network/models/hopf_oscillator_cy.pyx b/farms_network/models/hopf_oscillator_cy.pyx new file mode 100644 index 0000000..7397234 --- /dev/null +++ b/farms_network/models/hopf_oscillator_cy.pyx @@ -0,0 +1,113 @@ +""" Hopf Oscillator + +[1]L. Righetti and A. J. Ijspeert, “Pattern generators with sensory +feedback for the control of quadruped locomotion,” in 2008 IEEE +International Conference on Robotics and Automation, May 2008, +pp. 819–824. doi: 10.1109/ROBOT.2008.4543306. +""" + +from libc.stdio cimport printf + + +cpdef enum STATE: + + #STATES + nstates = NSTATES + x = STATE_X + y = STATE_Y + + +cdef void hopf_oscillator_input_tf( + double time, + const double* states, + const node_inputs_t inputs, + const node_t* node, + const edge_t** edges, + processed_inputs_t* out +) noexcept: + + # Parameters + cdef hopf_oscillator_params_t* params = ( node[0].params) + + # States + cdef double state_x = states[STATE.x] + cdef double state_y = states[STATE.y] + + for j in range(inputs.ninputs): + _input = inputs.network_outputs[inputs.node_indices[j]] + _weight = inputs.weights[j] + out.generic += (_weight*_input) + + +cdef void hopf_oscillator_ode( + double time, + const double* states, + double* derivatives, + processed_inputs_t input_vals, + double noise, + const node_t* node, +) noexcept: + # Parameters + cdef hopf_oscillator_params_t* params = ( node[0].params) + + # States + cdef double state_x = states[STATE.x] + cdef double state_y = states[STATE.y] + + cdef double input_val = input_vals.generic + + r_square = (state_x**2 + state_y**2) + # xdot : x_dot + derivatives[STATE.x] = ( + params.alpha*(params.mu - r_square)*state_x - params.omega*state_y + ) + # ydot : y_dot + derivatives[STATE.y] = ( + params.beta*(params.mu - r_square)*state_y + params.omega*state_x + (input_val) + ) + + +cdef double hopf_oscillator_output_tf( + double time, + const double* states, + processed_inputs_t input_val, + double noise, + const node_t* node, +) noexcept: + return states[STATE.y] + + +cdef class HopfOscillatorNodeCy(NodeCy): + """ Python interface to HopfOscillator Node C-Structure """ + + def __cinit__(self): + # override default ode and out methods + self._node.nstates = 2 + self._node.nparams = 4 + + self._node.is_statefull = True + self._node.input_tf = hopf_oscillator_input_tf + self._node.ode = hopf_oscillator_ode + self._node.output_tf = hopf_oscillator_output_tf + # parameters + self.params = hopf_oscillator_params_t() + self._node.params = &self.params + if self._node.params is NULL: + raise MemoryError("Failed to allocate memory for node parameters") + + def __init__(self, **kwargs): + super().__init__() + + # Set node parameters + self.params.mu = kwargs.pop("mu") + self.params.omega = kwargs.pop("omega") + self.params.alpha = kwargs.pop("alpha") + self.params.beta = kwargs.pop("beta") + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + @property + def parameters(self): + """ Parameters in the network """ + cdef hopf_oscillator_params_t params = ( self._edge.params)[0] + return params diff --git a/farms_network/models/izhikevich.pxd b/farms_network/models/izhikevich.pxd new file mode 100644 index 0000000..fb94545 --- /dev/null +++ b/farms_network/models/izhikevich.pxd @@ -0,0 +1,70 @@ +""" +----------------------------------------------------------------------- +Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty +Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +----------------------------------------------------------------------- + +Izhikevich neuron model based on Izhikevich et.al. 2003 +""" + +from ..core.node cimport NodeCy, Node +from ..core.edge cimport EdgeCy, Edge + + +cdef enum: + #STATES + NSTATES = 2 + STATE_V = 0 + STATE_U = 1 + + + +cdef packed struct IzhikevichNodeParameters: + double a # recovery time scale + double b # recovery sensitivity + double c # after-spike reset + double d # after-spike recovery reset + + +cdef: + void ode( + double time, + double* states, + double* derivatives, + double external_input, + double* network_outputs, + unsigned int* inputs, + double* weights, + double noise, + NodeCy* c_node, + EdgeCy** c_edges, + ) noexcept + double output( + double time, + double* states, + double external_input, + double* network_outputs, + unsigned int* inputs, + double* weights, + NodeCy* c_node, + EdgeCy** c_edges, + ) noexcept + + +cdef class IzhikevichNode(Node): + """ Python interface to Izhikevich Node C-Structure """ + + cdef: + IzhikevichNodeParameters parameters diff --git a/farms_network/models/izhikevich_cy.pxd b/farms_network/models/izhikevich_cy.pxd new file mode 100644 index 0000000..09f7735 --- /dev/null +++ b/farms_network/models/izhikevich_cy.pxd @@ -0,0 +1,119 @@ +""" +----------------------------------------------------------------------- +Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty +Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +----------------------------------------------------------------------- + +Izhikevich model +""" + +from libc.stdio cimport printf +from libc.stdlib cimport free, malloc +from libc.string cimport strdup + + +cpdef enum STATE: + + #STATES + nstates = NSTATES + v = STATE_V + u = STATE_U + + +cdef void ode( + double time, + double* states, + double* derivatives, + double external_input, + double* network_outputs, + unsigned int* inputs, + double* weights, + double noise, + NodeCy* c_node, + EdgeCy** c_edges, +) noexcept: + """ Node ODE """ + # Parameters + cdef IzhikevichNodeParameters params = ( + c_node[0].parameters + )[0] + + # States + cdef double state_v = states[STATE.v] + cdef double state_u = states[STATE.u] + + # Node inputs + # cdef: + # double _sum = 0.0 + # unsigned int j + # double _node_out, res, _input, _weight + + # cdef unsigned int ninputs = c_node.ninputs + # for j in range(ninputs): + # _input = network_outputs[inputs[j]] + # _weight = weights[j] + # if _weight >= 0.0: + # # Excitatory Synapse + # _sum += params.g_syn_e*cfabs(_weight)*_input*(state_v - params.e_syn_e) + # elif _weight < 0.0: + # # Inhibitory Synapse + # _sum += params.g_syn_i*cfabs(_weight)*_input*(state_v - params.e_syn_i) + + # # dV + # derivatives[STATE.v] = 0.04*state_v**2 + 5.0*state_v + 140.0 - state_u + _sum + # # dU + # derivatives[STATE.u] = params.a*(params.b*state_v - state_u) + + +cdef double output( + double time, + double* states, + double external_input, + double* network_outputs, + unsigned int* inputs, + double* weights, + NodeCy* c_node, + EdgeCy** c_edges, +) noexcept: + """ Node output. """ + ... + + +cdef class IzhikevichNode(Node): + """ Python interface to Izhikevich Node C-Structure """ + + def __cinit__(self): + self.c_node.model_type = strdup("IZHIKEVICH".encode('UTF-8')) + # override default ode and out methods + self.c_node.is_statefull = True + self.c_node.output = output + # parameters + self.c_node.parameters = malloc(sizeof(IzhikevichNodeParameters)) + if self.c_node.parameters is NULL: + raise MemoryError("Failed to allocate memory for node parameters") + + def __init__(self, name: str, **kwargs): + super().__init__(name) + + # Set node parameters + cdef IzhikevichNodeParameters* param = (self.c_node.parameters) + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + @property + def parameters(self): + """ Parameters in the network """ + cdef IzhikevichNodeParameters params = ( self.c_node.parameters)[0] + return params diff --git a/farms_network/neuron.pxd b/farms_network/models/leaky_integrator_cy.pxd similarity index 78% rename from farms_network/neuron.pxd rename to farms_network/models/leaky_integrator_cy.pxd index 6462daa..bd4aff4 100644 --- a/farms_network/neuron.pxd +++ b/farms_network/models/leaky_integrator_cy.pxd @@ -16,16 +16,5 @@ See the License for the specific language governing permissions and limitations under the License. ----------------------------------------------------------------------- -Header for Neuron Base Class. +Leaky Integrator Neuron. """ - -cdef class Neuron: - """Base neuron class. - """ - - cdef: - str _model_type - - cdef: - void c_ode_rhs(self, double[:] _y, double[:] _w, double[:] _p) - void c_output(self) diff --git a/farms_network/sensory_neuron.pxd b/farms_network/models/leaky_integrator_cy.pyx similarity index 67% rename from farms_network/sensory_neuron.pxd rename to farms_network/models/leaky_integrator_cy.pyx index b68af13..bd4aff4 100644 --- a/farms_network/sensory_neuron.pxd +++ b/farms_network/models/leaky_integrator_cy.pyx @@ -16,22 +16,5 @@ See the License for the specific language governing permissions and limitations under the License. ----------------------------------------------------------------------- -Sensory afferent neurons. +Leaky Integrator Neuron. """ - -from farms_container.parameter cimport Parameter -from farms_network.neuron cimport Neuron - -cdef class SensoryNeuron(Neuron): - cdef: - readonly str n_id - - # Input from external system - Parameter aff_inp - - # Ouputs - Parameter nout - - cdef: - void c_ode_rhs(self, double[:] _y, double[:] _w, double[:] _p) - void c_output(self) diff --git a/farms_network/models/li_danner.py b/farms_network/models/li_danner.py new file mode 100644 index 0000000..0235cfc --- /dev/null +++ b/farms_network/models/li_danner.py @@ -0,0 +1,12 @@ +from farms_network.core.node import Node +from farms_network.models import Models +from farms_network.core.options import LIDannerNodeOptions +from farms_network.models.li_danner_cy import LIDannerNodeCy + + +class LIDannerNode(Node): + + CY_NODE_CLASS = LIDannerNodeCy + + def __init__(self, name: str, **kwargs): + super().__init__(name=name, model=Models.LI_DANNER, **kwargs) diff --git a/farms_network/models/li_danner_cy.pxd b/farms_network/models/li_danner_cy.pxd new file mode 100644 index 0000000..3ad22d6 --- /dev/null +++ b/farms_network/models/li_danner_cy.pxd @@ -0,0 +1,62 @@ +""" Leaky Integrator Node Based on Danner et.al. 2016 """ + + +from ..core.node_cy cimport node_t, node_inputs_t, processed_inputs_t, NodeCy +from ..core.edge_cy cimport edge_t, EXCITATORY, INHIBITORY, CHOLINERGIC + + +cdef enum: + #STATES + NSTATES = 2 + STATE_V = 0 + STATE_A = 1 + + +cdef packed struct li_danner_params_t: + + double c_m # pF + double g_leak # nS + double e_leak # mV + double v_max # mV + double v_thr # mV + double g_syn_e # nS + double g_syn_i # nS + double e_syn_e # mV + double e_syn_i # mV + double tau_ch # ms + + +cdef void li_danner_input_tf( + double time, + const double* states, + const node_inputs_t inputs, + const node_t* node, + const edge_t** edges, + processed_inputs_t* out +) noexcept + + +cdef void li_danner_ode( + double time, + const double* states, + double* derivatives, + processed_inputs_t input_vals, + double noise, + const node_t* node, +) noexcept + + +cdef double li_danner_output_tf( + double time, + const double* states, + processed_inputs_t input_vals, + double noise, + const node_t* node, +) noexcept + + +cdef class LIDannerNodeCy(NodeCy): + """ Python interface to LI Danner Node C-Structure """ + + cdef: + li_danner_params_t params diff --git a/farms_network/models/li_danner_cy.pyx b/farms_network/models/li_danner_cy.pyx new file mode 100644 index 0000000..cdb4e2c --- /dev/null +++ b/farms_network/models/li_danner_cy.pyx @@ -0,0 +1,150 @@ +""" Leaky Integrator Node based on Danner et.al. """ + +from libc.math cimport fabs as cfabs +from libc.stdio cimport printf +from libc.string cimport strdup + +from farms_network.models import Models + + +cpdef enum STATE: + #STATES + nstates = NSTATES + v = STATE_V + a = STATE_A + + +cdef void li_danner_input_tf( + double time, + const double* states, + const node_inputs_t inputs, + const node_t* node, + const edge_t** edges, + processed_inputs_t* out +) noexcept: + # Parameters + cdef li_danner_params_t* params = ( node[0].params) + + # States + cdef double state_v = states[STATE.v] + cdef double state_a = states[STATE.a] + + # Node inputs + cdef: + double _sum = 0.0 + double _cholinergic_sum = 0.0 + unsigned int j + double _node_out, res, _input, _weight + const edge_t* _edge + + cdef unsigned int ninputs = inputs.ninputs + for j in range(ninputs): + _input = inputs.network_outputs[inputs.node_indices[j]] + _weight = inputs.weights[j] + _edge = edges[inputs.edge_indices[j]] + if _edge.type == EXCITATORY: + # Excitatory Synapse + out.excitatory += params.g_syn_e*cfabs(_weight)*_input*(state_v - params.e_syn_e) + elif _edge.type == INHIBITORY: + # Inhibitory Synapse + out.inhibitory += params.g_syn_i*cfabs(_weight)*_input*(state_v - params.e_syn_i) + elif _edge.type == CHOLINERGIC: + out.cholinergic += cfabs(_weight)*_input + + +cdef void li_danner_ode( + double time, + const double* states, + double* derivatives, + processed_inputs_t input_vals, + double noise, + const node_t* node, +) noexcept: + cdef li_danner_params_t* params = ( node[0].params) + + # States + cdef double state_v = states[STATE.v] + cdef double state_a = states[STATE.a] + + # Ileak + cdef double i_leak = params.g_leak * (state_v - params.e_leak) + + # noise current + cdef double i_noise = noise + + # da + derivatives[STATE.a] = (-state_a + input_vals.cholinergic)/params.tau_ch + + # dV + derivatives[STATE.v] = -( + i_leak + i_noise + input_vals.excitatory + input_vals.inhibitory + )/params.c_m + + +cdef double li_danner_output_tf( + double time, + const double* states, + processed_inputs_t input_vals, + double noise, + const node_t* node, +) noexcept: + cdef li_danner_params_t* params = ( node.params) + + cdef double _n_out = 0.0 + cdef double cholinergic_gain = 1.0 + cdef double state_v = states[STATE.v] + cdef double state_a = states[STATE.a] + + if state_v >= params.v_max: + _n_out = 1.0 + elif (params.v_thr <= state_v) and (state_v < params.v_max): + _n_out = (state_v - params.v_thr) / (params.v_max - params.v_thr) + elif state_v < params.v_thr: + _n_out = 0.0 + if state_a > 0.0: + cholinergic_gain = (1.0 + state_a) + _n_out = min(cholinergic_gain*_n_out, 1.0) + return _n_out + + +cdef class LIDannerNodeCy(NodeCy): + """ Python interface to Leaky Integrator Node C-Structure """ + + def __cinit__(self): + self._node.nstates = 2 + self._node.nparams = 10 + + self._node.is_statefull = True + + self._node.input_tf = li_danner_input_tf + self._node.ode = li_danner_ode + self._node.output_tf = li_danner_output_tf + # parameters + self.params = li_danner_params_t() + self._node.params = &self.params + if self._node.params is NULL: + raise MemoryError("Failed to allocate memory for node parameters") + + def __init__(self, **kwargs): + super().__init__() + + # Set node parameters + self.params.c_m = kwargs.pop("c_m") + self.params.g_leak = kwargs.pop("g_leak") + self.params.e_leak = kwargs.pop("e_leak") + self.params.v_max = kwargs.pop("v_max") + self.params.v_thr = kwargs.pop("v_thr") + self.params.g_syn_e = kwargs.pop("g_syn_e") + self.params.g_syn_i = kwargs.pop("g_syn_i") + self.params.e_syn_e = kwargs.pop("e_syn_e") + self.params.e_syn_i = kwargs.pop("e_syn_i") + self.params.tau_ch = kwargs.pop("tau_ch") + + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + @property + def parameters(self): + """ Parameters in the network """ + cdef li_danner_params_t params = ( self._node.params)[0] + return params diff --git a/farms_network/models/li_daun_interneuron_cy.pxd b/farms_network/models/li_daun_interneuron_cy.pxd new file mode 100644 index 0000000..3b93a21 --- /dev/null +++ b/farms_network/models/li_daun_interneuron_cy.pxd @@ -0,0 +1,28 @@ +""" Leaky Integrate and Fire InterNeuron Based on Daun et.al. """ + + +from ..core.node_cy cimport node_t, node_inputs_t, processed_inputs_t, NodeCy +from ..core.edge_cy cimport edge_t, EXCITATORY, INHIBITORY, CHOLINERGIC + + +cdef enum: + #STATES + NSTATES = 2 + STATE_V = 0 + STATE_H = 1 + + +cdef packed struct li_daun_params_t: + + double c_m + double g_nap + double e_nap + double v_h_h + double gamma_h + double v_t_h + double eps + double gamma_t + double v_h_m + double gamma_m + double g_leak + double e_leak diff --git a/farms_network/models/li_daun_interneuron_cy.pyx b/farms_network/models/li_daun_interneuron_cy.pyx new file mode 100644 index 0000000..e69de29 diff --git a/farms_network/models/li_nap_danner.py b/farms_network/models/li_nap_danner.py new file mode 100644 index 0000000..1aa7547 --- /dev/null +++ b/farms_network/models/li_nap_danner.py @@ -0,0 +1,12 @@ +from farms_network.core.node import Node +from farms_network.models import Models +from farms_network.core.options import LINaPDannerNodeOptions +from farms_network.models.li_nap_danner_cy import LINaPDannerNodeCy + + +class LINaPDannerNode(Node): + + CY_NODE_CLASS = LINaPDannerNodeCy + + def __init__(self, name: str, **kwargs): + super().__init__(name=name, model=Models.LI_NAP_DANNER, **kwargs) diff --git a/farms_network/models/li_nap_danner_cy.pxd b/farms_network/models/li_nap_danner_cy.pxd new file mode 100644 index 0000000..f8efe56 --- /dev/null +++ b/farms_network/models/li_nap_danner_cy.pxd @@ -0,0 +1,73 @@ +""" +Leaky Integrator Node Based on Danner et.al. with Na and K channels +""" + +from ..core.node_cy cimport node_t, node_inputs_t, processed_inputs_t, NodeCy +from ..core.edge_cy cimport edge_t, EXCITATORY, INHIBITORY, CHOLINERGIC + + +cdef enum: + + #STATES + NSTATES = 2 + STATE_V = 0 + STATE_H = 1 + + +cdef packed struct li_nap_danner_params_t: + + double c_m # pF + double g_leak # nS + double e_leak # mV + double g_nap # nS + double e_na # mV + double v1_2_m # mV + double k_m # + double v1_2_h # mV + double k_h # + double v1_2_t # mV + double k_t # + double tau_0 # mS + double tau_max # mS + double v_max # mV + double v_thr # mV + double g_syn_e # nS + double g_syn_i # nS + double e_syn_e # mV + double e_syn_i # mV + + +cdef void li_nap_danner_input_tf( + double time, + const double* states, + const node_inputs_t inputs, + const node_t* node, + const edge_t** edges, + processed_inputs_t* out +) noexcept + + +cdef void li_nap_danner_ode( + double time, + const double* states, + double* derivatives, + processed_inputs_t input_vals, + double noise, + const node_t* node, +) noexcept + + +cdef double li_nap_danner_output_tf( + double time, + const double* states, + processed_inputs_t input_vals, + double noise, + const node_t* node, +) noexcept + + +cdef class LINaPDannerNodeCy(NodeCy): + """ Python interface to LI Danner NaP Node C-Structure """ + + cdef: + li_nap_danner_params_t params diff --git a/farms_network/models/li_nap_danner_cy.pyx b/farms_network/models/li_nap_danner_cy.pyx new file mode 100644 index 0000000..9dbfd56 --- /dev/null +++ b/farms_network/models/li_nap_danner_cy.pyx @@ -0,0 +1,166 @@ +from libc.math cimport cosh as ccosh +from libc.math cimport exp as cexp +from libc.math cimport fabs as cfabs +from libc.stdio cimport printf +from libc.string cimport strdup +import numpy as np + +from farms_network.models import Models + + +cpdef enum STATE: + #STATES + nstates = NSTATES + v = STATE_V + h = STATE_H + + +cdef void li_nap_danner_input_tf( + double time, + const double* states, + const node_inputs_t inputs, + const node_t* node, + const edge_t** edges, + processed_inputs_t* out +) noexcept: + + cdef li_nap_danner_params_t* params = ( node[0].params) + + # States + cdef double state_v = states[STATE.v] + cdef double state_h = states[STATE.h] + + # Neuron inputs + cdef: + double _sum = 0.0 + unsigned int j + double _input, _weight + edge_t* _edge + + cdef unsigned int ninputs = inputs.ninputs + for j in range(ninputs): + _input = inputs.network_outputs[inputs.node_indices[j]] + _weight = inputs.weights[j] + _edge = edges[inputs.edge_indices[j]] + if _edge.type == EXCITATORY: + # Excitatory Synapse + out.excitatory += params.g_syn_e*cfabs(_weight)*_input*(state_v - params.e_syn_e) + elif _edge.type == INHIBITORY: + # print(_input, _weight, inputs.source_indices[j], edges[j].type) + # Inhibitory Synapse + out.inhibitory += params.g_syn_i*cfabs(_weight)*_input*(state_v - params.e_syn_i) + + +cdef void li_nap_danner_ode( + double time, + const double* states, + double* derivatives, + processed_inputs_t input_vals, + double noise, + const node_t* node, +) noexcept: + cdef li_nap_danner_params_t* params = ( node[0].params) + + # States + cdef double state_v = states[STATE.v] + cdef double state_h = states[STATE.h] + + # tau_h(V) + cdef double tau_h = params.tau_0 + (params.tau_max - params.tau_0) / \ + ccosh((state_v - params.v1_2_t) / params.k_t) + + # h_inf(V) + cdef double h_inf = 1./(1.0 + cexp((state_v - params.v1_2_h) / params.k_h)) + + # m(V) + cdef double m = 1./(1.0 + cexp((state_v - params.v1_2_m) / params.k_m)) + + # Inap + # pylint: disable=no-member + cdef double i_nap = params.g_nap * m * state_h * (state_v - params.e_na) + + # Ileak + cdef double i_leak = params.g_leak * (state_v - params.e_leak) + + # noise current + cdef double i_noise = noise + + # Slow inactivation + derivatives[STATE.h] = (h_inf - state_h) / tau_h + + # dV + derivatives[STATE.v] = -( + i_nap + i_leak + i_noise + input_vals.excitatory + input_vals.inhibitory + )/params.c_m + + +cdef double li_nap_danner_output_tf( + double time, + const double* states, + processed_inputs_t input_vals, + double noise, + const node_t* node, +) noexcept: + cdef li_nap_danner_params_t* params = ( node[0].params) + + cdef double _n_out = 0.0 + cdef double state_v = states[STATE.v] + if state_v >= params.v_max: + _n_out = 1.0 + elif (params.v_thr <= state_v) and (state_v < params.v_max): + _n_out = (state_v - params.v_thr) / (params.v_max - params.v_thr) + elif state_v < params.v_thr: + _n_out = 0.0 + return _n_out + + +cdef class LINaPDannerNodeCy(NodeCy): + """ Python interface to LI Danner NaP Node C-Structure """ + + def __cinit__(self): + self._node.nstates = 2 + self._node.nparams = 19 + + self._node.is_statefull = True + + self._node.input_tf = li_nap_danner_input_tf + self._node.ode = li_nap_danner_ode + self._node.output_tf = li_nap_danner_output_tf + # parameters + self.params = li_nap_danner_params_t() + self._node.params = &self.params + if self._node.params is NULL: + raise MemoryError("Failed to allocate memory for node parameters") + + def __init__(self, **kwargs): + super().__init__() + + # Set node parameters + self.params.c_m = kwargs.pop("c_m") + self.params.g_nap = kwargs.pop("g_nap") + self.params.e_na = kwargs.pop("e_na") + self.params.v1_2_m = kwargs.pop("v1_2_m") + self.params.k_m = kwargs.pop("k_m") + self.params.v1_2_h = kwargs.pop("v1_2_h") + self.params.k_h = kwargs.pop("k_h") + self.params.v1_2_t = kwargs.pop("v1_2_t") + self.params.k_t = kwargs.pop("k_t") + self.params.g_leak = kwargs.pop("g_leak") + self.params.e_leak = kwargs.pop("e_leak") + self.params.tau_0 = kwargs.pop("tau_0") + self.params.tau_max = kwargs.pop("tau_max") + self.params.v_max = kwargs.pop("v_max") + self.params.v_thr = kwargs.pop("v_thr") + self.params.g_syn_e = kwargs.pop("g_syn_e") + self.params.g_syn_i = kwargs.pop("g_syn_i") + self.params.e_syn_e = kwargs.pop("e_syn_e") + self.params.e_syn_i = kwargs.pop("e_syn_i") + + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + @property + def parameters(self): + """ Parameters in the network """ + cdef li_nap_danner_params_t params = ( self._node.params)[0] + return params diff --git a/farms_network/models/linear.py b/farms_network/models/linear.py new file mode 100644 index 0000000..0805b5b --- /dev/null +++ b/farms_network/models/linear.py @@ -0,0 +1,16 @@ +""" Linear """ + + +from farms_network.core.options import LinearNodeOptions +from farms_network.models.linear_cy import LinearNodeCy +from farms_network.core.node import Node +from farms_network.models import Models + + +class LinearNode(Node): + """ Linear node Cy """ + + CY_NODE_CLASS = LinearNodeCy + + def __init__(self, name: str, **kwargs): + super().__init__(name=name, model=Models.LINEAR, **kwargs) diff --git a/farms_network/models/linear_cy.pxd b/farms_network/models/linear_cy.pxd new file mode 100644 index 0000000..d574c6d --- /dev/null +++ b/farms_network/models/linear_cy.pxd @@ -0,0 +1,51 @@ +""" Linear model """ + + +from ..core.node_cy cimport node_t, node_inputs_t, processed_inputs_t, NodeCy +from ..core.edge_cy cimport edge_t + + +cdef enum: + #STATES + NSTATES = 0 + + +cdef packed struct linear_params_t: + double slope + double bias + + +cdef void linear_input_tf( + double time, + const double* states, + const node_inputs_t inputs, + const node_t* node, + const edge_t** edges, + processed_inputs_t* out +) noexcept + + +cdef void linear_ode( + double time, + const double* states, + double* derivatives, + processed_inputs_t input_val, + double noise, + const node_t* node, +) noexcept + + +cdef double linear_output_tf( + double time, + const double* states, + processed_inputs_t input_val, + double noise, + const node_t* node, +) noexcept + + +cdef class LinearNodeCy(NodeCy): + """ Python interface to Linear Node C-Structure """ + + cdef: + linear_params_t params diff --git a/farms_network/models/linear_cy.pyx b/farms_network/models/linear_cy.pyx new file mode 100644 index 0000000..7f5794a --- /dev/null +++ b/farms_network/models/linear_cy.pyx @@ -0,0 +1,109 @@ +""" Linear model """ + +from libc.stdio cimport printf +from libc.stdlib cimport free + + +cpdef enum STATE: + #STATES + nstates = NSTATES + + +cdef void linear_input_tf( + double time, + const double* states, + const node_inputs_t inputs, + const node_t* node, + const edge_t** edges, + processed_inputs_t* out +) noexcept: + cdef linear_params_t* params = ( node[0].params) + + cdef: + double _sum = 0.0 + unsigned int j, ninputs + double _input, _weight + + ninputs = inputs.ninputs + + for j in range(ninputs): + _input = inputs.network_outputs[inputs.node_indices[j]] + _weight = inputs.weights[j] + out.generic += _weight*_input + + +cdef void linear_ode( + double time, + const double* states, + double* derivatives, + processed_inputs_t input_vals, + double noise, + const node_t* node, +) noexcept: + raise NotImplementedError("ode must be implemented by node type") + + +cdef double linear_output_tf( + double time, + const double* states, + processed_inputs_t input_vals, + double noise, + const node_t* node, +) noexcept: + cdef linear_params_t* params = ( node[0].params) + cdef double input_val = input_vals.generic + cdef double res = params.slope*input_val + params.bias + return res + + +cdef class LinearNodeCy(NodeCy): + """ Python interface to Linear Node C-Structure """ + + def __cinit__(self): + # override default ode and out methods + self._node.nstates = 0 + self._node.nparams = 3 + + self._node.is_statefull = False + self._node.input_tf = linear_input_tf + self._node.output_tf = linear_output_tf + # parameters + self.params = linear_params_t() + self._node.params = &self.params + if self._node.params is NULL: + raise MemoryError("Failed to allocate memory for node parameters") + + def __init__(self, **kwargs): + super().__init__() + + # Set node parameters + self.params.slope = kwargs.pop("slope") + self.params.bias = kwargs.pop("bias") + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + @property + def slope(self): + """ Slope property """ + return ( self._node.params)[0].slope + + @slope.setter + def slope(self, value): + """ Set slope """ + ( self._node.params)[0].slope = value + + @property + def bias(self): + """ Bias property """ + return ( self._node.params)[0].bias + + @bias.setter + def bias(self, value): + """ Set bias """ + ( self._node.params)[0].bias = value + + @property + def parameters(self): + """ Parameters in the network """ + cdef linear_params_t params = ( self._node.params)[0] + return params diff --git a/farms_network/models/matsuoka_cy.pxd b/farms_network/models/matsuoka_cy.pxd new file mode 100644 index 0000000..74fcba1 --- /dev/null +++ b/farms_network/models/matsuoka_cy.pxd @@ -0,0 +1,58 @@ +""" Matsuoka Neuron model """ + + +from ..core.node_cy cimport node_t, node_inputs_t, processed_inputs_t, NodeCy +from ..core.edge_cy cimport edge_t, EdgeCy + + +cdef enum: + #STATES + NSTATES = 2 + STATE_V = 0 + STATE_W= 1 + + +cdef packed struct matsuoka_params_t: + + double c # + double b # + double tau # + double T # + double theta # + double nu # + + +cdef void matsuoka_input_tf( + double time, + const double* states, + const node_inputs_t inputs, + const node_t* node, + const edge_t** edges, + processed_inputs_t* out +) noexcept + + +cdef void matsuoka_ode( + double time, + const double* states, + double* derivatives, + processed_inputs_t input_vals, + double noise, + const node_t* node, +) noexcept + + +cdef double matsuoka_output_tf( + double time, + const double* states, + processed_inputs_t input_vals, + double noise, + const node_t* node, +) noexcept + + +cdef class MatsuokaNodeCy(NodeCy): + """ Python interface to Matsuoka Node C-Structure """ + + cdef: + matsuoka_params_t params diff --git a/farms_network/models/matsuoka_cy.pyx b/farms_network/models/matsuoka_cy.pyx new file mode 100644 index 0000000..412bcb2 --- /dev/null +++ b/farms_network/models/matsuoka_cy.pyx @@ -0,0 +1,64 @@ +""" Matsuoka Neuron model """ + +from libc.stdio cimport printf + + +cpdef enum STATE: + #STATES + nstates = NSTATES + v = STATE_V + w = STATE_W + + +cdef void matsuoka_input_tf( + double time, + const double* states, + const node_inputs_t inputs, + const node_t* node, + const edge_t** edges, + processed_inputs_t* out +) noexcept: + # Parameters + cdef matsuoka_params_t params = ( node[0].params)[0] + + # States + cdef double state_v = states[STATE.v] + cdef double state_w = states[STATE.w] + + cdef processed_inputs_t processed_inputs = { + 'generic': 0.0, + 'excitatory': 0.0, + 'inhibitory': 0.0, + 'cholinergic': 0.0, + 'phase_coupling': 0.0 + } + + cdef: + double _sum = 0.0 + unsigned int j + double _input, _weight + + for j in range(inputs.ninputs): + _input = inputs.network_outputs[inputs.node_indices[j]] + _weight = inputs.weights[j] + + +cdef void matsuoka_ode( + double time, + const double* states, + double* derivatives, + processed_inputs_t input_vals, + double noise, + const node_t* node, +) noexcept: + pass + + +cdef double matsuoka_output_tf( + double time, + const double* states, + processed_inputs_t input_vals, + double noise, + const node_t* node, +) noexcept: + pass diff --git a/farms_network/models/morphed_oscillator_cy.pxd b/farms_network/models/morphed_oscillator_cy.pxd new file mode 100644 index 0000000..5375e1d --- /dev/null +++ b/farms_network/models/morphed_oscillator_cy.pxd @@ -0,0 +1,73 @@ +""" +----------------------------------------------------------------------- +Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty +Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +----------------------------------------------------------------------- + +Morphed Oscillator model +""" + + +# from ..core.node cimport Node, PyNode + + +# cdef enum: + +# #STATES +# NSTATES = 3 +# STATE_THETA = 0 +# STATE_R= 1 +# # Morphing function state +# STATE_F = 2 + + +# cdef packed struct MorphedOscillatorNodeParameters: + +# double f +# double gamma +# double mu +# double zeta + + +# cdef: +# void ode( +# double time, +# double* states, +# double* derivatives, +# double external_input, +# double* network_outputs, +# unsigned int* inputs, +# double* weights, +# double noise, +# Node* node, +# Edge** edges, +# ) noexcept +# double output( +# double time, +# double* states, +# double external_input, +# double* network_outputs, +# unsigned int* inputs, +# double* weights, +# Node* node, +# Edge** edges, +# ) noexcept + + +# cdef class PyMorphedOscillatorNode(PyNode): +# """ Python interface to MorphedOscillator Node C-Structure """ + +# cdef: +# MorphedOscillatorNodeParameters parameters diff --git a/farms_network/models/morphed_oscillator_cy.pyx b/farms_network/models/morphed_oscillator_cy.pyx new file mode 100644 index 0000000..7bab29a --- /dev/null +++ b/farms_network/models/morphed_oscillator_cy.pyx @@ -0,0 +1,124 @@ +""" +----------------------------------------------------------------------- +Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty +Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +----------------------------------------------------------------------- + +Morphed Oscillator model +""" + + +# from libc.stdio cimport printf +# from libc.stdlib cimport malloc +# from libc.string cimport strdup + + +# cpdef enum STATE: + +# #STATES +# nstates = NSTATES +# theta = STATE_THETA +# r = STATE_R +# # Morphing function state +# f = STATE_F + + +# cdef void ode( +# double time, +# double* states, +# double* derivatives, +# double external_input, +# double* network_outputs, +# unsigned int* inputs, +# double* weights, +# double noise, +# Node* node, +# Edge** edges, +# ) noexcept: +# """ ODE """ +# # Parameters +# cdef MorphedOscillatorNodeParameters params = ( node[0].parameters)[0] + +# # States +# cdef double state_x = states[STATE.x] +# cdef double state_y = states[STATE.y] + +# cdef: +# double _sum = 0.0 +# unsigned int j +# double _input, _weight + +# cdef unsigned int ninputs = node.ninputs +# for j in range(ninputs): +# _input = network_outputs[inputs[j]] +# _weight = weights[j] +# _sum += (_weight*_input) + +# r_square = (state_x**2 + state_y**2) +# # xdot : x_dot +# derivatives[STATE.x] = ( +# params.alpha*(params.mu - r_square)*state_x - params.omega*state_y +# ) +# # ydot : y_dot +# derivatives[STATE.y] = ( +# params.beta*(params.mu - r_square)*state_y + params.omega*state_x + (_sum) +# ) + + +# cdef double output( +# double time, +# double* states, +# double external_input, +# double* network_outputs, +# unsigned int* inputs, +# double* weights, +# Node* node, +# Edge** edges, +# ) noexcept: +# """ Node output. """ +# return states[STATE.y] + + +# cdef class PyMorphedOscillatorNode(PyNode): +# """ Python interface to MorphedOscillator Node C-Structure """ + +# def __cinit__(self): +# self.node.model_type = strdup("MORPHED_OSCILLATOR".encode('UTF-8')) +# # override default ode and out methods +# self.node.is_statefull = True +# self.node.ode = ode +# self.node.output = output +# # parameters +# self.node.parameters = malloc(sizeof(MorphedOscillatorNodeParameters)) +# if self.node.parameters is NULL: +# raise MemoryError("Failed to allocate memory for node parameters") + +# def __init__(self, name: str, **kwargs): +# super().__init__(name) + +# # Set node parameters +# cdef MorphedOscillatorNodeParameters* params = (self.node.parameters) +# params.f = kwargs.pop("f") +# params.gamme = kwargs.pop("gamme") +# params.mu = kwargs.pop("mu") +# params.zeta = kwargs.pop("zeta") +# if kwargs: +# raise Exception(f'Unknown kwargs: {kwargs}') + +# @property +# def parameters(self): +# """ Parameters in the network """ +# cdef MorphedOscillatorNodeParameters params = ( self.node.parameters)[0] +# return params diff --git a/farms_network/network_generator.pxd b/farms_network/models/morris_lecar_cy.pxd similarity index 62% rename from farms_network/network_generator.pxd rename to farms_network/models/morris_lecar_cy.pxd index 4d2a043..7fe32ef 100644 --- a/farms_network/network_generator.pxd +++ b/farms_network/models/morris_lecar_cy.pxd @@ -15,26 +15,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ----------------------------------------------------------------------- -""" -from farms_network.leaky_integrator cimport LeakyIntegrator -from farms_container.table cimport Table -from farms_network.neuron cimport Neuron - -cimport numpy - -cdef class NetworkGenerator(object): - cdef: - dict __dict__ - Neuron[:] c_neurons - Table states - Table dstates - Table constants - Table inputs - Table weights - Table parameters - Table outputs - - unsigned int num_neurons - - cpdef double[:] ode(self, double t, double[:] state) +Morris Lecar Neuron model. +""" diff --git a/farms_network/models/morris_lecar_cy.pyx b/farms_network/models/morris_lecar_cy.pyx new file mode 100644 index 0000000..da9f621 --- /dev/null +++ b/farms_network/models/morris_lecar_cy.pyx @@ -0,0 +1 @@ +""" Morris Lecar Neuron model. """ diff --git a/farms_network/models/oscillator.py b/farms_network/models/oscillator.py new file mode 100644 index 0000000..45a351f --- /dev/null +++ b/farms_network/models/oscillator.py @@ -0,0 +1,26 @@ +from farms_network.core.node import Node +from farms_network.core.edge import Edge +from farms_network.models import Models +from farms_network.core.options import OscillatorNodeOptions +from farms_network.models.oscillator_cy import OscillatorNodeCy +from farms_network.models.oscillator_cy import OscillatorEdgeCy + + +class OscillatorNode(Node): + + CY_NODE_CLASS = OscillatorNodeCy + + def __init__(self, name: str, **kwargs): + super().__init__(name=name, model=Models.OSCILLATOR, **kwargs) + + # Oscillator-specific properties + + +class OscillatorEdge(Edge): + + CY_EDGE_CLASS = OscillatorEdgeCy + + def __init__(self, source, target, edge_type, model=Models.OSCILLATOR, **kwargs): + super().__init__( + source=source, target=target, edge_type=edge_type, model=Models.OSCILLATOR, **kwargs + ) diff --git a/farms_network/models/oscillator_cy.pxd b/farms_network/models/oscillator_cy.pxd new file mode 100644 index 0000000..1f98034 --- /dev/null +++ b/farms_network/models/oscillator_cy.pxd @@ -0,0 +1,68 @@ +""" Oscillator model """ + + +from ..core.node_cy cimport node_t, node_inputs_t, processed_inputs_t, NodeCy +from ..core.edge_cy cimport edge_t, EdgeCy + + +cdef enum: + #STATES + NSTATES = 3 + STATE_PHASE = 0 + STATE_AMPLITUDE= 1 + STATE_AMPLITUDE_0 = 2 + + +cdef packed struct oscillator_params_t: + + double intrinsic_frequency # Hz + double nominal_amplitude # + double amplitude_rate # + + +cdef packed struct oscillator_edge_params_t: + + double phase_difference # radians + + +cdef void oscillator_input_tf( + double time, + const double* states, + const node_inputs_t inputs, + const node_t* node, + const edge_t** edges, + processed_inputs_t* out +) noexcept + + +cdef void oscillator_ode( + double time, + const double* states, + double* derivatives, + processed_inputs_t input_vals, + double noise, + const node_t* node, +) noexcept + + +cdef double oscillator_output_tf( + double time, + const double* states, + processed_inputs_t input_vals, + double noise, + const node_t* node, +) noexcept + + +cdef class OscillatorNodeCy(NodeCy): + """ Python interface to Oscillator Node C-Structure """ + + cdef: + oscillator_params_t params + + +cdef class OscillatorEdgeCy(EdgeCy): + """ Python interface to Oscillator Edge C-Structure """ + + cdef: + oscillator_edge_params_t params diff --git a/farms_network/models/oscillator_cy.pyx b/farms_network/models/oscillator_cy.pyx new file mode 100644 index 0000000..1e89a13 --- /dev/null +++ b/farms_network/models/oscillator_cy.pyx @@ -0,0 +1,147 @@ +""" Oscillator model """ + + +from libc.math cimport M_PI +from libc.math cimport sin as csin +from libc.stdio cimport printf +from libc.stdlib cimport free, malloc +from libc.string cimport strdup + + +cpdef enum STATE: + + #STATES + nstates = NSTATES + phase = STATE_PHASE + amplitude = STATE_AMPLITUDE + amplitude_0 = STATE_AMPLITUDE_0 + + +cdef void oscillator_input_tf( + double time, + const double* states, + const node_inputs_t inputs, + const node_t* node, + const edge_t** edges, + processed_inputs_t* out +) noexcept: + + # Parameters + cdef oscillator_params_t* params = ( node[0].params) + cdef oscillator_edge_params_t edge_params + + # States + cdef double state_phase = states[STATE.phase] + cdef double state_amplitude = states[STATE.amplitude] + + cdef: + double _sum = 0.0 + unsigned int j + double _input, _weight + unsigned int ninputs = inputs.ninputs + + for j in range(ninputs): + _input = inputs.network_outputs[inputs.node_indices[j]] + _weight = inputs.weights[j] + edge_params = ( edges[inputs.edge_indices[j]].params)[0] + out.generic += _weight*state_amplitude*csin(_input - state_phase - edge_params.phase_difference) + + +cdef void oscillator_ode( + double time, + const double* states, + double* derivatives, + processed_inputs_t input_vals, + double noise, + const node_t* node, +) noexcept: + + # Parameters + cdef oscillator_params_t* params = ( node[0].params) + cdef oscillator_edge_params_t edge_params + + # States + cdef double state_phase = states[STATE.phase] + cdef double state_amplitude = states[STATE.amplitude] + cdef double state_amplitude_0 = states[STATE.amplitude_0] + + cdef double input_val = input_vals.generic + + # phidot : phase_dot + derivatives[STATE.phase] = 2*M_PI*params.intrinsic_frequency + input_val + # ampdot + derivatives[STATE.amplitude] = state_amplitude_0 + derivatives[STATE.amplitude_0] = params.amplitude_rate*( + (params.amplitude_rate/4.0)*(params.nominal_amplitude - state_amplitude) - state_amplitude_0 + ) + + +cdef double oscillator_output_tf( + double time, + const double* states, + processed_inputs_t input_val, + double noise, + const node_t* node, +) noexcept: + return states[STATE.phase] + + +cdef class OscillatorNodeCy(NodeCy): + """ Python interface to Oscillator Node C-Structure """ + + def __cinit__(self): + # override default ode and out methods + self._node.nstates = 3 + self._node.nparams = 3 + + self._node.is_statefull = True + self._node.input_tf = oscillator_input_tf + self._node.ode = oscillator_ode + self._node.output_tf = oscillator_output_tf + # parameters + self.params = oscillator_params_t() + self._node.params = &self.params + if self._node.params is NULL: + raise MemoryError("Failed to allocate memory for node parameters") + + def __init__(self, **kwargs): + super().__init__() + + # Set node parameters + self.params.intrinsic_frequency = kwargs.pop("intrinsic_frequency") + self.params.nominal_amplitude = kwargs.pop("nominal_amplitude") + self.params.amplitude_rate = kwargs.pop("amplitude_rate") + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + @property + def parameters(self): + """ Parameters in the network """ + cdef oscillator_params_t params = ( self._node.params)[0] + return params + + +cdef class OscillatorEdgeCy(EdgeCy): + """ Python interface to Oscillator Edge C-Structure """ + + def __cinit__(self, edge_type: str, **kwargs): + # parameters + self.params = oscillator_edge_params_t() + self._edge.params = &self.params + self._edge.nparams = 1 + if self._edge.params is NULL: + raise MemoryError("Failed to allocate memory for edge parameters") + + def __init__(self, edge_type: str, **kwargs): + super().__init__(edge_type) + + # Set edge parameters + self.params.phase_difference = kwargs.pop("phase_difference") + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + @property + def parameters(self): + """ Parameters in the network """ + cdef oscillator_edge_params_t params = ( self._edge.params)[0] + return params diff --git a/farms_network/models/relay.py b/farms_network/models/relay.py new file mode 100644 index 0000000..7df98b8 --- /dev/null +++ b/farms_network/models/relay.py @@ -0,0 +1,16 @@ +""" Relay """ + + +from farms_network.core.options import RelayNodeOptions +from farms_network.models.relay_cy import RelayNodeCy +from farms_network.core.node import Node +from farms_network.models import Models + + +class RelayNode(Node): + """ Relay node Cy """ + + CY_NODE_CLASS = RelayNodeCy + + def __init__(self, name: str, **kwargs): + super().__init__(name=name, model=Models.RELAY, **kwargs) diff --git a/farms_network/models/relay_cy.pxd b/farms_network/models/relay_cy.pxd new file mode 100644 index 0000000..1ed2122 --- /dev/null +++ b/farms_network/models/relay_cy.pxd @@ -0,0 +1,43 @@ +""" Relay model """ + + +from ..core.node_cy cimport node_t, node_inputs_t, processed_inputs_t, NodeCy +from ..core.edge_cy cimport edge_t + + +cdef enum: + #STATES + NSTATES = 0 + + +cdef void relay_input_tf( + double time, + const double* states, + const node_inputs_t inputs, + const node_t* node, + const edge_t** edges, + processed_inputs_t* out +) noexcept + + +cdef void relay_ode( + double time, + const double* states, + double* derivatives, + processed_inputs_t input_val, + double noise, + const node_t* node, +) noexcept + + +cdef double relay_output_tf( + double time, + const double* states, + processed_inputs_t input_val, + double noise, + const node_t* node, +) noexcept + + +cdef class RelayNodeCy(NodeCy): + """ Python interface to External Relay Node C-Structure """ diff --git a/farms_network/models/relay_cy.pyx b/farms_network/models/relay_cy.pyx new file mode 100644 index 0000000..40028e6 --- /dev/null +++ b/farms_network/models/relay_cy.pyx @@ -0,0 +1,60 @@ +""" Relay model """ + +from libc.stdio cimport printf + + +cpdef enum STATE: + #STATES + nstates = NSTATES + + +cdef void relay_input_tf( + double time, + const double* states, + const node_inputs_t inputs, + const node_t* node, + const edge_t** edges, + processed_inputs_t* out +) noexcept: + + out.generic = inputs.external_input + + +cdef void relay_ode( + double time, + const double* states, + double* derivatives, + processed_inputs_t input_vals, + double noise, + const node_t* node, +) noexcept: + pass + + +cdef double relay_output_tf( + double time, + const double* states, + processed_inputs_t input_vals, + double noise, + const node_t* node, +) noexcept: + return input_vals.generic + + +cdef class RelayNodeCy(NodeCy): + """ Python interface to Relay Node C-Structure """ + + def __cinit__(self): + # override default ode and out methods + self._node.nstates = 0 + self._node.nparams = 0 + + self._node.is_statefull = False + self._node.input_tf = relay_input_tf + self._node.output_tf = relay_output_tf + + def __init__(self, **kwargs): + super().__init__() + + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') diff --git a/farms_network/models/relu.py b/farms_network/models/relu.py new file mode 100644 index 0000000..2744085 --- /dev/null +++ b/farms_network/models/relu.py @@ -0,0 +1,39 @@ +""" ReLU """ + +from farms_network.core.node import Node +from farms_network.models import Models +from farms_network.core.options import ReLUNodeOptions +from farms_network.models.relu_cy import ReLUNodeCy + + +class ReLUNode(Node): + + CY_NODE_CLASS = ReLUNodeCy + + def __init__(self, name: str, **kwargs): + super().__init__(name=name, model=Models.RELU, **kwargs) + + # ReLU-specific properties + @property + def gain(self): + return self._node_cy.gain + + @gain.setter + def gain(self, value): + self._node_cy.gain = value + + @property + def sign(self): + return self._node_cy.sign + + @sign.setter + def sign(self, value): + self._node_cy.sign = value + + @property + def offset(self): + return self._node_cy.offset + + @offset.setter + def offset(self, value): + self._node_cy.offset = value diff --git a/farms_network/models/relu_cy.pxd b/farms_network/models/relu_cy.pxd new file mode 100644 index 0000000..e8826dd --- /dev/null +++ b/farms_network/models/relu_cy.pxd @@ -0,0 +1,52 @@ +""" Rectified Linear Unit """ + + +from ..core.node_cy cimport node_t, node_inputs_t, processed_inputs_t, NodeCy +from ..core.edge_cy cimport edge_t + + +cdef enum: + #STATES + NSTATES = 0 + + +cdef packed struct relu_params_t: + double gain + double sign + double offset + + +cdef void relu_input_tf( + double time, + const double* states, + const node_inputs_t inputs, + const node_t* node, + const edge_t** edges, + processed_inputs_t* out +) noexcept + + +cdef void relu_ode( + double time, + const double* states, + double* derivatives, + processed_inputs_t input_val, + double noise, + const node_t* node, +) noexcept + + +cdef double relu_output_tf( + double time, + const double* states, + processed_inputs_t input_val, + double noise, + const node_t* node, +) noexcept + + +cdef class ReLUNodeCy(NodeCy): + """ Python interface to ReLU Node C-Structure """ + + cdef: + relu_params_t params diff --git a/farms_network/models/relu_cy.pyx b/farms_network/models/relu_cy.pyx new file mode 100644 index 0000000..1fceb0f --- /dev/null +++ b/farms_network/models/relu_cy.pyx @@ -0,0 +1,102 @@ +""" Rectified Linear Unit """ + + +from libc.stdio cimport printf +from libc.stdlib cimport free + + +cpdef enum STATE: + + #STATES + nstates = NSTATES + + +cdef void relu_input_tf( + double time, + const double* states, + const node_inputs_t inputs, + const node_t* node, + const edge_t** edges, + processed_inputs_t* out +) noexcept: + cdef relu_params_t* params = ( node[0].params) + + cdef: + double _sum = 0.0 + unsigned int j, ninputs + double _input, _weight + + ninputs = inputs.ninputs + + for j in range(ninputs): + _input = inputs.network_outputs[inputs.node_indices[j]] + _weight = inputs.weights[j] + out.generic += _weight*_input + + +cdef void relu_ode( + double time, + const double* states, + double* derivatives, + processed_inputs_t input_vals, + double noise, + const node_t* node, +) noexcept: + raise NotImplementedError("ode must be implemented by node type") + + +cdef double relu_output_tf( + double time, + const double* states, + processed_inputs_t input_vals, + double noise, + const node_t* node, +) noexcept: + cdef relu_params_t* params = ( node[0].params) + cdef double input_val = input_vals.generic + cdef double res = max(0.0, params.gain*(params.sign*input_val + params.offset)) + return res + + +cdef class ReLUNodeCy(NodeCy): + """ Python interface to ReLU Node C-Structure """ + + def __cinit__(self): + # override default ode and out methods + self._node.nstates = 0 + self._node.nparams = 3 + + self._node.is_statefull = False + self._node.input_tf = relu_input_tf + self._node.output_tf = relu_output_tf + # parameters + self.params = relu_params_t() + self._node.params = &self.params + if self._node.params is NULL: + raise MemoryError("Failed to allocate memory for node parameters") + + def __init__(self, **kwargs): + super().__init__() + + # Set node parameters + self.params.gain = kwargs.pop("gain") + self.params.sign = kwargs.pop("sign") + self.params.offset = kwargs.pop("offset") + if kwargs: + raise Exception(f'Unknown kwargs: {kwargs}') + + @property + def gain(self): + """ Gain property """ + return ( self._node.params)[0].gain + + @gain.setter + def gain(self, value): + """ Set gain """ + ( self._node.params)[0].gain = value + + @property + def parameters(self): + """ Parameters in the network """ + cdef relu_params_t params = ( self._node.params)[0] + return params diff --git a/farms_network/morphed_oscillator.pxd b/farms_network/morphed_oscillator.pxd deleted file mode 100644 index f2f611c..0000000 --- a/farms_network/morphed_oscillator.pxd +++ /dev/null @@ -1,69 +0,0 @@ -""" ------------------------------------------------------------------------ -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ - -Oscillator model. -""" - -from farms_container.parameter cimport Parameter -from farms_network.neuron cimport Neuron - -cdef struct MorphedOscillatorNeuronInput: - int neuron_idx - int weight_idx - int phi_idx - -cdef class MorphedOscillator(Neuron): - cdef: - readonly str n_id - - unsigned int num_inputs - - # parameters - # constants - double f - double gamma # : Gamma - double mu # : Mu - double zeta # : Zeta - - # Morphing function - Parameter f_theta - Parameter fd_theta - - # states - Parameter theta - Parameter r - - # inputs - Parameter ext_in - - # ode - Parameter theta_dot - Parameter r_dot - - # Ouputs - Parameter nout - - # neuron connenctions - MorphedOscillatorNeuronInput[:] neuron_inputs - - cdef: - void c_ode_rhs(self, double[:] _y, double[:] _w, double[:] _p) - void c_output(self) - double c_neuron_inputs_eval( - self, double _neuron_out, double _weight, double _theta, - double _phi) diff --git a/farms_network/morphed_oscillator.pyx b/farms_network/morphed_oscillator.pyx deleted file mode 100644 index ea6e3c4..0000000 --- a/farms_network/morphed_oscillator.pyx +++ /dev/null @@ -1,177 +0,0 @@ -""" ------------------------------------------------------------------------ -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ - -Morphed Oscillator model -""" -from libc.stdio cimport printf -import farms_pylog as pylog -from libc.math cimport exp -from libc.math cimport M_PI -from libc.math cimport sin as csin -import numpy as np -cimport numpy as cnp - - -cdef class MorphedOscillator(Neuron): - - def __init__(self, n_id, num_inputs, neural_container, **kwargs): - """Initialize. - - Parameters - ---------- - n_id: str - Unique ID for the neuron in the network. - """ - super(MorphedOscillator, self).__init__('leaky') - - # Neuron ID - self.n_id = n_id - - # Initialize parameters - (_, self.f) = neural_container.constants.add_parameter( - 'f_' + self.n_id, kwargs.get('f', 0.5)) - - (_, self.gamma) = neural_container.constants.add_parameter( - 'g_' + self.n_id, kwargs.get('gamma', 100)) - - (_, self.mu) = neural_container.constants.add_parameter( - 'mu_' + self.n_id, kwargs.get('mu', 1.0)) - - (_, self.zeta) = neural_container.constants.add_parameter( - 'z_' + self.n_id, kwargs.get('zeta', 0.0)) - print(self.zeta) - # Initialize states - self.theta = neural_container.states.add_parameter( - 'theta_' + self.n_id, kwargs.get('theta0', 0.0))[0] - self.r = neural_container.states.add_parameter( - 'r_' + self.n_id, kwargs.get('r0', 0.0))[0] - - # External inputs - self.ext_in = neural_container.inputs.add_parameter( - 'ext_in_' + self.n_id)[0] - - # Morphing function - self.f_theta = neural_container.parameters.add_parameter( - 'f_theta_' + self.n_id, kwargs.get('f_theta0', 0.0))[0] - self.fd_theta = neural_container.parameters.add_parameter( - 'fd_theta_' + self.n_id, kwargs.get('fd_theta0', 0.0))[0] - - # ODE RHS - self.theta_dot = neural_container.dstates.add_parameter( - 'theta_dot_' + self.n_id, 0.0)[0] - self.r_dot = neural_container.dstates.add_parameter( - 'r_dot_' + self.n_id, 0.0)[0] - - # Output - self.nout = neural_container.outputs.add_parameter( - 'nout_' + self.n_id, 0.0)[0] - - # Neuron inputs - self.neuron_inputs = cnp.ndarray((num_inputs,), - dtype=[('neuron_idx', 'i'), - ('weight_idx', 'i'), - ('theta_idx', 'i')]) - - self.num_inputs = num_inputs - - def add_ode_input(self, int idx, neuron, neural_container, **kwargs): - """ Add relevant external inputs to the ode.""" - # Create a struct to store the inputs and weights to the neuron - cdef MorphedOscillatorNeuronInput n - # Get the neuron parameter - neuron_idx = neural_container.outputs.get_parameter_index( - 'nout_'+neuron.n_id) - - # Add the weight parameter - weight = neural_container.weights.add_parameter( - 'w_' + neuron.n_id + '_to_' + self.n_id, - kwargs.get('weight', 0.0))[0] - phi = neural_container.parameters.add_parameter( - 'phi_' + neuron.n_id + '_to_' + self.n_id, - kwargs.get('phi', 0.0))[0] - - weight_idx = neural_container.weights.get_parameter_index( - 'w_' + neuron.n_id + '_to_' + self.n_id) - phi_idx = neural_container.parameters.get_parameter_index( - 'phi_' + neuron.n_id + '_to_' + self.n_id) - - n.neuron_idx = neuron_idx - n.weight_idx = weight_idx - n.phi_idx = phi_idx - - # Append the struct to the list - self.neuron_inputs[idx] = n - - def output(self): - """Neuron activation function. - Parameters - ---------- - m_potential: float - Neuron membrane potential - """ - return self.c_output() - - def ode_rhs(self, y, w, p): - """ Python interface to the ode_rhs computation.""" - self.c_ode_rhs(y, w, p) - - #################### C-FUNCTIONS #################### - cdef void c_ode_rhs(self, double[:] _y, double[:] _w, double[:] _p): - """ Compute the ODE. Internal Setup Function.""" - - # Current state - cdef double _theta = self.theta.c_get_value() - cdef double _r = self.r.c_get_value() - cdef double f_theta = self.f_theta.c_get_value() - cdef double fd_theta = self.fd_theta.c_get_value() - - # Neuron inputs - cdef double _sum = 0.0 - cdef unsigned int j - cdef double _neuron_out - cdef double _weight - cdef double _phi - - for j in range(self.num_inputs): - _neuron_out = _y[self.neuron_inputs[j].neuron_idx] - _weight = _w[self.neuron_inputs[j].weight_idx] - _phi = _p[self.neuron_inputs[j].phi_idx] - _sum += self.c_neuron_inputs_eval( - _neuron_out, _weight, _theta, _phi) - - # thetadot : theta_dot - self.theta_dot.c_set_value(2*M_PI*self.f + _sum) - - # rdot - # cdef double r_dot_1 = 2*M_PI*self.f*_r*(fd_theta/f_theta) - # cdef double r_dot_2 = _r*self.gamma*(self.mu - ((_r*_r)/(f_theta*f_theta))) - # self.r_dot.c_set_value(r_dot_1 + r_dot_2 + self.zeta) - - cdef double r_dot_1 = fd_theta*self.theta_dot.c_get_value() - cdef double r_dot_2 = self.gamma*(f_theta - _r) - self.r_dot.c_set_value(r_dot_1 + r_dot_2 + self.zeta) - - cdef void c_output(self): - """ Neuron output. """ - self.nout.c_set_value(self.theta.c_get_value()) - - cdef double c_neuron_inputs_eval( - self, double _neuron_out, double _weight, double _theta, - double _phi): - """ Evaluate neuron inputs.""" - return _weight*csin(_neuron_out - _theta - _phi) diff --git a/farms_network/morris_lecar.pxd b/farms_network/morris_lecar.pxd deleted file mode 100644 index 164bc2e..0000000 --- a/farms_network/morris_lecar.pxd +++ /dev/null @@ -1,75 +0,0 @@ -""" ------------------------------------------------------------------------ -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ - -Morris Lecar Neuron model. -""" - -from farms_container.parameter cimport Parameter -from farms_network.neuron cimport Neuron - -cdef struct MLNeuronInput: - int neuron_idx - int weight_idx - int phi_idx - -cdef class MorrisLecarNeuron(Neuron): - cdef: - readonly str n_id - - unsigned int num_inputs - - # parameters - # constants - double internal_curr - double C - double g_fast - double g_slow - double g_leak - double E_fast - double E_slow - double E_leak - double phi_w - double beta_m - double beta_w - double gamma_m - double gamma_w - - # states - Parameter V - Parameter w - - # inputs - Parameter ext_in - - # ode - Parameter V_dot - Parameter w_dot - - # Ouputs - Parameter nout - - # neuron connenctions - MLNeuronInput[:] neuron_inputs - - cdef: - void c_ode_rhs(self, double[:] _y, double[:] _w, double[:] _p) - void c_output(self) - cdef double c_neuron_inputs_eval( - self, double _neuron_out, double _weight, double _phi, - double _V, double _w - ) diff --git a/farms_network/morris_lecar.pyx b/farms_network/morris_lecar.pyx deleted file mode 100644 index d2877c2..0000000 --- a/farms_network/morris_lecar.pyx +++ /dev/null @@ -1,185 +0,0 @@ -""" ------------------------------------------------------------------------ -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ - -Morris Lecar Neuron model -""" -from libc.stdio cimport printf -import farms_pylog as pylog -from libc.math cimport tanh as ctanh -from libc.math cimport cosh as ccosh -import numpy as np -cimport numpy as cnp - - -cdef class MorrisLecarNeuron(Neuron): - - def __init__(self, n_id, num_inputs, neural_container, **kwargs): - """Initialize. - - Parameters - ---------- - n_id: str - Unique ID for the neuron in the network. - """ - super(MorrisLecarNeuron, self).__init__('morris_lecar') - - # Neuron ID - self.n_id = n_id - - # Initialize parameters - (_, self.internal_curr) = neural_container.constants.add_parameter( - 'I_' + self.n_id, kwargs.get('I', 100.0)) - (_, self.C) = neural_container.constants.add_parameter( - 'C_' + self.n_id, kwargs.get('C', 2.0)) - (_, self.g_fast) = neural_container.constants.add_parameter( - 'g_fast_' + self.n_id, kwargs.get('g_fast', 20.0)) - (_, self.g_slow) = neural_container.constants.add_parameter( - 'g_slow_' + self.n_id, kwargs.get('g_slow', 20.0)) - (_, self.g_leak) = neural_container.constants.add_parameter( - 'g_leak_' + self.n_id, kwargs.get('g_leak', 2.0)) - (_, self.E_fast) = neural_container.constants.add_parameter( - 'E_fast_' + self.n_id, kwargs.get('E_fast', 50.0)) - (_, self.E_slow) = neural_container.constants.add_parameter( - 'E_slow_' + self.n_id, kwargs.get('E_slow', -100.0)) - (_, self.E_leak) = neural_container.constants.add_parameter( - 'E_leak_' + self.n_id, kwargs.get('E_leak', -70.0)) - (_, self.phi_w) = neural_container.constants.add_parameter( - 'phi_w_' + self.n_id, kwargs.get('phi_w', 0.15)) - (_, self.beta_m) = neural_container.constants.add_parameter( - 'beta_m_' + self.n_id, kwargs.get('beta_m', 0.0)) - (_, self.gamma_m) = neural_container.constants.add_parameter( - 'gamma_m_' + self.n_id, kwargs.get('gamma_m', 18.0)) - (_, self.beta_w) = neural_container.constants.add_parameter( - 'beta_w_' + self.n_id, kwargs.get('beta_w', -10.0)) - (_, self.gamma_w) = neural_container.constants.add_parameter( - 'gamma_w_' + self.n_id, kwargs.get('gamma_w', 13.0)) - - # Initialize states - self.V = neural_container.states.add_parameter( - 'V_' + self.n_id, kwargs.get('V0', 0.0))[0] - self.w = neural_container.states.add_parameter( - 'w_' + self.n_id, kwargs.get('w0', 0.0))[0] - - # External inputs - self.ext_in = neural_container.inputs.add_parameter( - 'ext_in_' + self.n_id)[0] - - # ODE RHS - self.V_dot = neural_container.dstates.add_parameter( - 'V_dot_' + self.n_id, 0.0)[0] - self.w_dot = neural_container.dstates.add_parameter( - 'w_dot_' + self.n_id, 0.0)[0] - - # Output - self.nout = neural_container.outputs.add_parameter( - 'nout_' + self.n_id, 0.0)[0] - - # Neuron inputs - self.neuron_inputs = cnp.ndarray((num_inputs,), - dtype=[('neuron_idx', 'i'), - ('weight_idx', 'i'), - ('phi_idx', 'i')]) - - self.num_inputs = num_inputs - - def add_ode_input(self, int idx, neuron, neural_container, **kwargs): - """ Add relevant external inputs to the ode.""" - # Create a struct to store the inputs and weights to the neuron - cdef MLNeuronInput n - # Get the neuron parameter - neuron_idx = neural_container.outputs.get_parameter_index( - 'nout_'+neuron.n_id) - - # Add the weight parameter - weight = neural_container.weights.add_parameter( - 'w_' + neuron.n_id + '_to_' + self.n_id, - kwargs.get('weight', 0.0))[0] - phi = neural_container.parameters.add_parameter( - 'phi_' + neuron.n_id + '_to_' + self.n_id, - kwargs.get('phi', 0.0))[0] - - weight_idx = neural_container.weights.get_parameter_index( - 'w_' + neuron.n_id + '_to_' + self.n_id) - phi_idx = neural_container.parameters.get_parameter_index( - 'phi_' + neuron.n_id + '_to_' + self.n_id) - - n.neuron_idx = neuron_idx - n.weight_idx = weight_idx - n.phi_idx = phi_idx - - # Append the struct to the list - self.neuron_inputs[idx] = n - - def output(self): - """Neuron activation function. - Parameters - ---------- - m_potential: float - Neuron membrane potential - """ - return self.c_output() - - def ode_rhs(self, y, w, p): - """ Python interface to the ode_rhs computation.""" - self.c_ode_rhs(y, w, p) - - #################### C-FUNCTIONS #################### - cdef void c_ode_rhs(self, double[:] _y, double[:] _w, double[:] _p): - """ Compute the ODE. Internal Setup Function.""" - - # Current state - cdef double _V = self.V.c_get_value() - cdef double _W = self.w.c_get_value() - - # Neuron inputs - cdef double _sum = 0.0 - cdef unsigned int j - cdef double _neuron_out - cdef double _weight - cdef double _phi - - for j in range(self.num_inputs): - _neuron_out = _y[self.neuron_inputs[j].neuron_idx] - _weight = _w[self.neuron_inputs[j].weight_idx] - _phi = _p[self.neuron_inputs[j].phi_idx] - _sum += self.c_neuron_inputs_eval(_neuron_out, - _weight, _phi, _V, _W) - - cdef double m_inf_V = 0.5*(1.0+ctanh((_V-self.beta_m)/self.gamma_m)) - - cdef double w_inf_V = 0.5*(1.0+ctanh((_V-self.beta_w)/self.gamma_w)) - - cdef double tau_w_V = (1./ccosh((_V-self.beta_w)/(2*self.gamma_w))) - - # V_dot - self.V_dot.c_set_value((1.0/self.C)*(self.internal_curr - self.g_fast*m_inf_V*(_V-self.E_fast) - - self.g_slow*_W*(_V - self.E_slow) - self.g_leak*(_V - self.E_leak))) - - # wdot - self.w_dot.c_set_value(self.phi_w*(w_inf_V - _W)/tau_w_V) - - cdef void c_output(self): - """ Neuron output. """ - self.nout.c_set_value(self.V.c_get_value()) - - cdef double c_neuron_inputs_eval( - self, double _neuron_out, double _weight, double _phi, - double _V, double _w): - """ Evaluate neuron inputs.""" - # Linear coupling term in potential - return _weight*(_neuron_out - _V) diff --git a/farms_network/network_generator.pyx b/farms_network/network_generator.pyx deleted file mode 100644 index 18c0e71..0000000 --- a/farms_network/network_generator.pyx +++ /dev/null @@ -1,136 +0,0 @@ -""" ------------------------------------------------------------------------ -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ - -Generate neural network. -""" -from farms_container.table cimport Table -from libc.stdio cimport printf - -from farms_network.neuron cimport Neuron - -from collections import OrderedDict - -import farms_pylog as pylog -import numpy as np -from cython.parallel import prange - -from farms_network.neuron_factory import NeuronFactory - -cimport cython -cimport numpy as cnp - - -cdef class NetworkGenerator: - """ Generate Neural Network. - """ - - def __init__(self, graph, neural_container): - """Initialize. - - Parameters - ---------- - graph_file_path: - File path to the graphml structure. - """ - super(NetworkGenerator, self).__init__() - - # Attributes - self.neurons = OrderedDict() # Neurons in the network - self.states = neural_container.add_table('states') - self.dstates =
neural_container.add_table('dstates') - self.constants =
neural_container.add_table('constants', table_type='CONSTANT') - self.inputs =
neural_container.add_table('inputs') - self.weights =
neural_container.add_table('weights', table_type='CONSTANT') - self.parameters =
neural_container.add_table('parameters', table_type='CONSTANT') - self.outputs =
neural_container.add_table('outputs') - - self.odes = [] - - self.fin = {} - self.integrator = {} - - # Read the graph - self.graph = graph - - # Get the number of neurons in the model - self.num_neurons = len(self.graph) - - self.c_neurons = np.ndarray((self.num_neurons,), dtype=Neuron) - self.generate_neurons(neural_container) - self.generate_network(neural_container) - - def generate_neurons(self, neural_container): - """Generate the complete neural network. - Instatiate a neuron model for each node in the graph - - Returns - ------- - out : - Return true if successfully created the neurons - """ - cdef int j - for j, (name, neuron) in enumerate(sorted(self.graph.nodes.items())): - # Add neuron to list - pylog.debug( - 'Generating neuron model : {} of type {}'.format( - name, neuron['model'])) - # Generate Neuron Models - _neuron = NeuronFactory.gen_neuron(neuron['model']) - self.neurons[name] = _neuron( - name, self.graph.in_degree(name), - neural_container, - **neuron - ) - self.c_neurons[j] = self.neurons[name] - - def generate_network(self, neural_container): - """ - Generate the network. - """ - for name, neuron in list(self.neurons.items()): - pylog.debug( - 'Establishing neuron {} network connections'.format( - name)) - for j, pred in enumerate(self.graph.predecessors(name)): - pylog.debug(('{} -> {}'.format(pred, name))) - # Set the weight of the parameter - neuron.add_ode_input( - j, - self.neurons[pred], - neural_container, - **self.graph[pred][name]) - - #################### C-FUNCTIONS #################### - cpdef double[:] ode(self, double t, double[:] state): - self.states.c_set_values(state) - cdef unsigned int j - cdef Neuron neuron - - cdef double[:] outputs = self.outputs.c_get_values() - cdef double[:] weights = self.weights.c_get_values() - cdef double[:] parameters = self.parameters.c_get_values() - - for j in range(self.num_neurons): - neuron = self.c_neurons[j] - neuron.c_output() - - for j in range(self.num_neurons): - neuron = self.c_neurons[j] - neuron.c_ode_rhs(outputs, weights, parameters) - - return self.dstates.c_get_values() diff --git a/farms_network/networkx_model.py b/farms_network/networkx_model.py deleted file mode 100644 index e0e87bb..0000000 --- a/farms_network/networkx_model.py +++ /dev/null @@ -1,240 +0,0 @@ -""" ------------------------------------------------------------------------ -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ - -This class implements the network of different neurons. -""" - -import os - -import networkx as nx -import numpy as np - -import farms_pylog as pylog - - -class NetworkXModel(object): - """Generate Network based on graphml format. - """ - - def __init__(self): - """ Initialize. """ - super(NetworkXModel, self).__init__() - self.graph = None # NetworkX graph - self.pos = {} # Neuron positions - self.edge_pos = {} - self.color_map = [] # Neuron color map - self.color_map_arr = [] # Neuron color map - self.color_map_edge = [] # Neuron edge color map - self.alpha_edge = [] # Neuron edge alpha - self.edge_style = [] # Arrow edge style - self.net_matrix = None - - def read_graph(self, path): - """Read graph from the file path. - Parameters - ---------- - path : - File path of the graph - - Returns - ------- - out : - Graph object created by Networkx - """ - self.graph = nx.read_graphml(path) - return self.graph - - def network_sparse_matrix(self): - """Show network connectivity matrix.""" - self.net_matrix = nx.to_scipy_sparse_matrix(self.graph) - self.net_matrix = self.net_matrix.todense() - return self.net_matrix - - def show_network_sparse_matrix(self): - """Show network connectivity matrix.""" - pylog.info('Showing network connectivity matrix') - pylog.info(self.net_matrix) - - def read_neuron_position_in_graph(self, from_layout=False): - """ Read the positions of neurons. - Only if positions are defined. """ - for _neuron, data in list(self.graph.nodes.items()): - self.pos[_neuron] = (data.get('x', None), - data.get('y', None)) - self.edge_pos[_neuron] = (data.get('x', None), - data.get('y', None)) - check_pos_is_none = None in [ - val for x in list(self.pos.values()) for val in x] - if check_pos_is_none: - pylog.warning('Missing neuron position information.') - # self.pos = nx.kamada_kawai_layout(self.graph) - # self.pos = nx.spring_layout(self.graph) - self.pos = nx.shell_layout(self.graph) - self.edge_pos = self.pos - - def read_neuron_colors_in_graph(self): - """ Read the neuron display colors.""" - import matplotlib.colors as mcolors - for data in list(self.graph.nodes.values()): - self.color_map.extend(data.get('color', 'r')) - self.color_map_arr.append(mcolors.colorConverter.to_rgb( - self.color_map[-1])) - - def read_edge_colors_in_graph(self, edge_attribute='weight'): - """ Read the neuron display colors.""" - max_weight = max(list(dict(self.graph.edges).items()), - key=lambda x: abs(x[1][edge_attribute]), - default=[{edge_attribute: 0.0}])[-1][edge_attribute] - - max_weight = abs(max_weight) - for _, _, attr in self.graph.edges(data=True): - _weight = attr.get(edge_attribute, 0.0) - # pylint: disable=no-member - try: - _weight_ratio = _weight/max_weight - except ZeroDivisionError: - _weight_ratio = 0.0 - - if np.sign(_weight_ratio) == 1: - self.color_map_edge.extend('g') - # pylint: disable=no-member - elif np.sign(_weight_ratio) == -1: - self.color_map_edge.extend('r') - else: - self.color_map_edge.extend('k') - self.alpha_edge.append( - max(np.abs(_weight_ratio), 0.1)) - - def visualize_network(self, - node_size=1500, - node_labels=False, - edge_labels=False, - edge_attribute='weight', - edge_alpha=True, - plt_out=None, - **kwargs - ): - """ Visualize the neural network.""" - self.read_neuron_position_in_graph() - self.read_neuron_colors_in_graph() - if color_map_edge := kwargs.get('color_map_edge'): - self.color_map_edge = color_map_edge - else: - self.read_edge_colors_in_graph(edge_attribute=edge_attribute) - - if plt_out is not None: - fig = plt_out.figure('Network') - plt_out.autoscale(True) - ax = plt_out.gca() - else: - import matplotlib.pyplot as plt - fig = plt.figure('Network') - plt.autoscale(True) - ax = plt.gca() - - # Draw Nodes - _ = nx.draw_networkx_nodes(self.graph, pos=self.pos, - node_color=self.color_map, - node_size=node_size, - alpha=kwargs.pop('alpha', 0.25), - edgecolors='k', - linewidths=2.0, - ax=ax - ) - if node_labels: - nx.draw_networkx_labels( - self.graph, - pos=self.pos, - labels={n: val["label"] for n, val in self.graph.nodes.items()}, - font_size=kwargs.pop('font_size', 11.0), - font_weight=kwargs.pop('font_weight', 'bold'), - font_family=kwargs.pop('font_family', 'sans-serif'), - alpha=kwargs.pop('alpha', 1.0), - ax=ax - ) - if edge_labels: - labels = { - ed: round(val, 3) - for ed, val in nx.get_edge_attributes( - self.graph, edge_attribute - ).items() - } - nx.draw_networkx_edge_labels(self.graph, - pos=self.pos, - rotate=False, - edge_labels=labels, - font_size=kwargs.pop( - 'font_size', 6.5), - clip_on=True, - ax=ax) - edges = nx.draw_networkx_edges(self.graph, - pos=self.pos, - node_size=node_size, - edge_color=self.color_map_edge, - width=kwargs.pop('edge_width', 1.), - arrowsize=kwargs.pop('arrow_size', 10), - style=kwargs.pop( - 'edge_style', 'dashed'), - arrows=kwargs.pop('arrows', True), - connectionstyle=kwargs.pop( - 'connection_style', "arc3,rad=-0.0"), - min_source_margin=kwargs.pop( - 'min_source_margin', 5), - min_target_margin=kwargs.pop( - 'min_target_margin', 5), - ax=ax) - if edge_alpha: - for edge in range(self.graph.number_of_edges()): - edges[edge].set_alpha(self.alpha_edge[edge]) - - if plt_out is not None: - plt_out.draw() - plt_out.subplots_adjust( - left=0, right=1, top=1, bottom=0) - plt_out.grid() - ax.invert_yaxis() - plt_out.tight_layout() - else: - # fig.draw() - ax.invert_yaxis() - fig.subplots_adjust( - left=0, right=1, top=1, bottom=0) - ax.grid() - return fig - - def save_network_to_dot(self, name='graph'): - """ Save network file to dot format.""" - from networkx.drawing.nx_pydot import write_dot - write_dot(self.graph, name + '.dot') - try: - os.system('dot -Tpng {0}.dot > {0}.png'.format(name)) - except BaseException: - pylog.error('Command not found') - - -def main(): - """Main. - Test NetworkXModel Reading and Visualization.""" - net_ = NetworkXModel() - net_.read_graph( - './conf/stick_insect_cpg_v1.graphml') - net_.visualize_network() - - -if __name__ == '__main__': - main() diff --git a/farms_network/neural_system.py b/farms_network/neural_system.py deleted file mode 100644 index 68d2f3b..0000000 --- a/farms_network/neural_system.py +++ /dev/null @@ -1,123 +0,0 @@ -""" ------------------------------------------------------------------------ -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ - -""" - -import sys - -import numpy as np -from networkx import DiGraph -from scipy.integrate import ode - -from farms_network.network_generator import NetworkGenerator -from farms_network.integrators import c_rk4 - -from .networkx_model import NetworkXModel - -if not sys.warnoptions: - import warnings - warnings.simplefilter("ignore", UserWarning) - - -class NeuralSystem(NetworkXModel): - """Neural System. - """ - - def __init__(self, network_graph, container): - """ Initialize neural system. """ - super(NeuralSystem, self).__init__() - self.container = container - # Add name-space for neural system data - neural_table = self.container.add_namespace('neural') - # self.config_path = config_path - self.integrator = None - if isinstance(network_graph, str): - self.read_graph(network_graph) - elif isinstance(network_graph, DiGraph): - self.graph = network_graph - # Create network - self.network = NetworkGenerator(self.graph, neural_table) - self.time = None - self.state = None - - def setup_integrator( - self, x0=None, integrator=u'dopri5', atol=1e-12, rtol=1e-6, - max_step=0.0, method=u'adams' - ): - """Setup system.""" - self.integrator = ode(self.network.ode).set_integrator( - integrator, - method=method, - atol=atol, - rtol=rtol, - max_step=max_step, - # nsteps=nsteps - ) - - if x0 is None: - # initial_values = np.random.rand( - # self.container.neural.states.values - # ) - self.integrator.set_initial_value( - self.container.neural.states.values, 0.0 - ) - else: - self.integrator.set_initial_value(x0, 0.0) - self.state = self.integrator.y - self.time = 0.0 - - def euler(self, time, state, func, step_size=1e-3): - """ Euler integrator """ - new_state = state + step_size*np.array(func(time, state)) - return new_state - - def rk4(self, time, state, func, step_size=1e-3, n_substeps=1): - """ Runge-kutta order 4 integrator """ - step_size = step_size/float(n_substeps) - for j in range(n_substeps): - K1 = np.array(func(time, state)) - K2 = np.array(func(time + step_size/2, state + (step_size/2 * K1))) - K3 = np.array(func(time + step_size/2, state + (step_size/2 * K2))) - K4 = np.array(func(time + step_size, state + (step_size * K3))) - state = state + (K1 + 2*K2 + 2*K3 + K4)*(step_size/6) - time += step_size - return state - - def rk5(self, time, state, func, step_size=1e-3): - """ Runge-kutta order 5 integrator """ - K1 = np.array(func(time, state)) - K2 = np.array(func(time + step_size/4.0, state + (step_size/4.0 * K1))) - K3 = np.array(func(time + step_size/4.0, state + (step_size/8.0)*(K1 + K2))) - K4 = np.array(func(time + step_size/2.0, state - (step_size/2.0 * K2) + (step_size * K3))) - K5 = np.array(func(time + 3*step_size/4.0, state + (step_size/16.0)*(3*K1 + 9*K4))) - K6 = np.array(func(time + step_size, state + (step_size/7.0)*(-3*K1 + 2*K2 + 12*K3 + -12*K4 + 8*K5))) - new_state = np.array(state) + (7/90*K1 + 32/90*K3 + 12/90*K4 + 32/90*K5 + 7/90*K6)*(step_size) - return new_state - - def step(self, dt=1, update=True): - """Step ode system. """ - self.time += dt - self.state = self.rk4( - self.time, self.state, self.network.ode, step_size=dt, n_substeps=2 - ) - # self.state = c_rk4( - # self.time, self.state, self.network.ode, step_size=dt - # ) - # self.integrator.set_initial_value(self.integrator.y, - # self.integrator.t) - # self.integrator.integrate(self.integrator.t+dt) diff --git a/farms_network/neuron.pyx b/farms_network/neuron.pyx deleted file mode 100644 index acb4a4f..0000000 --- a/farms_network/neuron.pyx +++ /dev/null @@ -1,85 +0,0 @@ -""" ------------------------------------------------------------------------ -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ -""" -import farms_pylog as pylog -pylog.set_level('error') - - -cdef class Neuron: - """Base neuron class. - """ - - def __init__(self, model_type): - super(Neuron, self).__init__() - self._model_type = model_type # : Type of neuron @property - - def add_ode_input(self, neuron, neural_container, **kwargs): - """Add relevant external inputs to the ode. - Parameters - ---------- - neuron : - Neuron model from which the input is received. - kwargs : - Contains the weight/synaptic information from the receiving neuron. - """ - pylog.error( - 'add_ode_input : Method not implemented in Neuron child class') - raise NotImplementedError() - - def ode_rhs(self, y, w, p): - """ ODE RHS. - Returns - ---------- - ode_rhs: - List containing the rhs equations of the ode states in the system - """ - pylog.error('ode_rhs : Method not implemented in Neuron child class') - raise NotImplementedError() - - def output(self): - """ Output of the neuron model. - Returns - ---------- - out: - Output of the neuron model - """ - pylog.error('output : Method not implemented in Neuron child class') - raise NotImplementedError() - - #################### PROPERTIES #################### - @property - def model_type(self): - """Neuron type. """ - return self._model_type - - @model_type.setter - def model_type(self, value): - """ - Parameters - ---------- - value : - Type of neuron model - """ - self._model_type = value - - #################### C-FUNCTIONS #################### - cdef void c_ode_rhs(self, double[:] _y, double[:] _w, double[:] _p): - pass - - cdef void c_output(self): - pass diff --git a/farms_network/neuron_factory.py b/farms_network/neuron_factory.py deleted file mode 100644 index 9ec08a5..0000000 --- a/farms_network/neuron_factory.py +++ /dev/null @@ -1,97 +0,0 @@ -""" ------------------------------------------------------------------------ -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ - -Factory class for generating the neuron model. -""" - -from farms_network.fitzhugh_nagumo import FitzhughNagumo -from farms_network.hh_daun_motorneuron import HHDaunMotorneuron -from farms_network.hopf_oscillator import HopfOscillator -from farms_network.leaky_integrator import LeakyIntegrator -from farms_network.lif_danner import LIFDanner -from farms_network.lif_danner_nap import LIFDannerNap -from farms_network.lif_daun_interneuron import LIFDaunInterneuron -from farms_network.matsuoka_neuron import MatsuokaNeuron -from farms_network.morphed_oscillator import MorphedOscillator -from farms_network.morris_lecar import MorrisLecarNeuron -from farms_network.oscillator import Oscillator -from farms_network.sensory_neuron import SensoryNeuron -from farms_network.relu import ReLUNeuron - - -class NeuronFactory(object): - """Implementation of Factory Neuron class. - """ - neurons = { # 'if': IntegrateAndFire, - 'oscillator': Oscillator, - 'hopf_oscillator': HopfOscillator, - 'morphed_oscillator': MorphedOscillator, - 'leaky': LeakyIntegrator, - 'sensory': SensoryNeuron, - 'lif_danner_nap': LIFDannerNap, - 'lif_danner': LIFDanner, - 'lif_daun_interneuron': LIFDaunInterneuron, - 'hh_daun_motorneuron': HHDaunMotorneuron, - 'fitzhugh_nagumo': FitzhughNagumo, - 'matsuoka_neuron': MatsuokaNeuron, - 'morris_lecar': MorrisLecarNeuron, - 'relu': ReLUNeuron, - } - - def __init__(self): - """Factory initialization.""" - super(NeuronFactory, self).__init__() - - @staticmethod - def register_neuron(neuron_type, neuron_instance): - """ - Register a new type of neuron that is a child class of Neuron. - Parameters - ---------- - self: type - description - neuron_type: - String to identifier for the neuron. - neuron_instance: - Class of the neuron to register. - """ - NeuronFactory.neurons[neuron_type] = neuron_instance - - @staticmethod - def gen_neuron(neuron_type): - """Generate the necessary type of neuron. - Parameters - ---------- - self: type - description - neuron_type: - One of the following list of available neurons. - 1. if - Integrate and Fire - 2. lif_danner_nap - LIF Danner Nap - 3. lif_danner - LIF Danner - 4. lif_daun_interneuron - LIF Daun Interneuron - 5. hh_daun_motorneuron - HH_Daun_Motorneuron - Returns - ------- - neuron: - Appropriate neuron class. - """ - neuron = NeuronFactory.neurons.get(neuron_type) - if not neuron: - raise ValueError(neuron_type) - return neuron diff --git a/farms_network/noise/__init__.py b/farms_network/noise/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/farms_network/noise/ornstein_uhlenbeck.py b/farms_network/noise/ornstein_uhlenbeck.py new file mode 100644 index 0000000..278e88d --- /dev/null +++ b/farms_network/noise/ornstein_uhlenbeck.py @@ -0,0 +1,21 @@ +from .ornstein_uhlenbeck_cy import OrnsteinUhlenbeckCy + +from ..core.options import NetworkOptions + + +class OrnsteinUhlenbeck: + """ OrnsteinUhlenbeck Noise Model """ + + def __init__(self, network_options: NetworkOptions): + """ Init """ + self.noise_options = [ + node.noise + for node in network_options.nodes + if node.noise + if node.noise.is_stochastic + ] + + self.n_dim = len(self.noise_options) + self.timestep = network_options.integration.timestep + self.seed = network_options.random_seed + self._ou_cy = OrnsteinUhlenbeckCy(self.noise_options, self.seed) diff --git a/farms_network/noise/ornstein_uhlenbeck_cy.pxd b/farms_network/noise/ornstein_uhlenbeck_cy.pxd new file mode 100644 index 0000000..56d8a73 --- /dev/null +++ b/farms_network/noise/ornstein_uhlenbeck_cy.pxd @@ -0,0 +1,60 @@ +# distutils: language = c++ + +from libc.math cimport sqrt as csqrt +from libc.stdint cimport uint_fast32_t, uint_fast64_t + +from ..numeric.system_cy cimport SDESystem + + +cdef extern from "" namespace "std" nogil: + cdef cppclass random_device: + ctypedef uint_fast32_t result_type + random_device() + result_type operator()() + + cdef cppclass mt19937: + ctypedef uint_fast32_t result_type + mt19937() + mt19937(result_type seed) + result_type operator()() + result_type min() + result_type max() + void discard(size_t z) + void seed(result_type seed) + + cdef cppclass mt19937_64: + ctypedef uint_fast64_t result_type + + mt19937_64() + mt19937_64(result_type seed) + result_type operator()() + result_type min() + result_type max() + void discard(size_t z) + void seed(result_type seed) + + cdef cppclass normal_distribution[T]: + ctypedef T result_type + normal_distribution() + normal_distribution(result_type, result_type) + result_type operator()[Generator](Generator&) + result_type min() + result_type max() + + +cdef struct ornstein_uhlenbeck_params_t: + double mu + double sigma + double tau + + +cdef class OrnsteinUhlenbeckCy(SDESystem): + + cdef: + int n_dim + ornstein_uhlenbeck_params_t* params + mt19937_64 random_generator + normal_distribution[double] distribution + + cdef void evaluate_a(self, double time, double[:] states, double[:] drift) noexcept + cdef void evaluate_b(self, double time, double[:] states, double[:] diffusion) noexcept diff --git a/farms_network/noise/ornstein_uhlenbeck_cy.pyx b/farms_network/noise/ornstein_uhlenbeck_cy.pyx new file mode 100644 index 0000000..71cd5ed --- /dev/null +++ b/farms_network/noise/ornstein_uhlenbeck_cy.pyx @@ -0,0 +1,108 @@ + # distutils: language = c++ + +""" +----------------------------------------------------------------------- +Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty +Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +----------------------------------------------------------------------- +""" + + +from libc.math cimport sqrt as csqrt +from libc.stdio cimport printf +from libc.stdlib cimport free, malloc + +from .ornstein_uhlenbeck_cy cimport mt19937, mt19937_64, normal_distribution + +from typing import List + +import numpy as np + +from ..core.options import OrnsteinUhlenbeckOptions + + +cdef class OrnsteinUhlenbeckCy(SDESystem): + """ Ornstein Uhlenheck parameters """ + + def __cinit__( + self, + noise_options: List[OrnsteinUhlenbeckOptions], + random_seed: int + ): + """ C initialization for manual memory allocation """ + + self.params = malloc( + len(noise_options) * sizeof(ornstein_uhlenbeck_params_t) + ) + + if self.params is NULL: + raise MemoryError( + "Failed to allocate memory for OrnsteinUhlenbeck Parameters" + ) + + def __dealloc__(self): + """ Deallocate any manual memory as part of clean up """ + if self.params is not NULL: + free(self.params) + + def __init__( + self, + noise_options: List[OrnsteinUhlenbeckOptions], + random_seed: int + ): + super().__init__() + self.n_dim = len(noise_options) + self.initialize_parameters_from_options(noise_options, random_seed) + + cdef void evaluate_a(self, double time, double[:] states, double[:] drift) noexcept: + cdef unsigned int j + cdef ornstein_uhlenbeck_params_t param + + for j in range(self.n_dim): + param = self.params[j] + drift[j] = (param.mu - states[j])/param.tau + + cdef void evaluate_b(self, double time, double[:] states, double[:] diffusion) noexcept: + cdef unsigned int j + cdef ornstein_uhlenbeck_params_t param + + for j in range(self.n_dim): + param = self.params[j] + diffusion[j] = param.sigma*( + csqrt(2.0/param.tau)*(self.distribution(self.random_generator)) + ) + + def py_evaluate_a(self, time, states, drift): + self.evaluate_a(time, states, drift) + return drift + + def py_evaluate_b(self, time, states, diffusion): + self.evaluate_b(time, states, diffusion) + return diffusion + + def initialize_parameters_from_options(self, noise_options, random_seed): + """ Initialize the parameters from noise options + + # TODO: Remove default random seed in code + """ + for index in range(self.n_dim): + noise_option = noise_options[index] + self.params[index].mu = noise_option.mu + self.params[index].sigma = noise_option.sigma + self.params[index].tau = noise_option.tau + + self.random_generator = mt19937_64(random_seed) + # The distribution should always be mean=0.0 and std=1.0 + self.distribution = normal_distribution[double](0.0, 1.0) diff --git a/farms_network/numeric/__init__.py b/farms_network/numeric/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/farms_network/numeric/integrators.py b/farms_network/numeric/integrators.py new file mode 100644 index 0000000..23bbe67 --- /dev/null +++ b/farms_network/numeric/integrators.py @@ -0,0 +1,9 @@ +""" Integrators """ + + +class RK4: + """ RK4 Integrator """ + + def __init__(self, system, integration_options): + " Integration " + self._rk4_integrator = None diff --git a/farms_network/numeric/integrators_cy.pxd b/farms_network/numeric/integrators_cy.pxd new file mode 100644 index 0000000..bb94b4b --- /dev/null +++ b/farms_network/numeric/integrators_cy.pxd @@ -0,0 +1,33 @@ +from farms_core.array.array_cy cimport DoubleArray1D +from libc.math cimport sqrt as csqrt + +from .system_cy cimport ODESystem, SDESystem + +include 'types.pxd' + + +cdef class RK4Solver: + cdef: + DoubleArray1D k1 + DoubleArray1D k2 + DoubleArray1D k3 + DoubleArray1D k4 + DoubleArray1D states_tmp + + unsigned int dim + double dt + + cdef void _step(self, ODESystem sys, double time, double[:] state) noexcept + + +cdef class EulerMaruyamaSolver: + + cdef: + DoubleArray1D drift + DoubleArray1D diffusion + + unsigned int dim + double dt + + cdef: + cdef void step(self, SDESystem sys, double time, double[:] state) noexcept diff --git a/farms_network/numeric/integrators_cy.pyx b/farms_network/numeric/integrators_cy.pyx new file mode 100644 index 0000000..0c9434e --- /dev/null +++ b/farms_network/numeric/integrators_cy.pyx @@ -0,0 +1,95 @@ +import numpy as np + +from ..core.options import IntegrationOptions + +from libc.stdio cimport printf + +NPDTYPE = np.float64 + + +cdef class RK4Solver: + + def __init__ (self, unsigned int dim, double dt): + + super().__init__() + self.dim = dim + self.dt = dt + self.k1 = DoubleArray1D( + array=np.full(shape=dim, fill_value=0.0, dtype=NPDTYPE,) + ) + self.k2 = DoubleArray1D( + array=np.full(shape=dim, fill_value=0.0, dtype=NPDTYPE,) + ) + self.k3 = DoubleArray1D( + array=np.full(shape=dim, fill_value=0.0, dtype=NPDTYPE,) + ) + self.k4 = DoubleArray1D( + array=np.full(shape=dim, fill_value=0.0, dtype=NPDTYPE,) + ) + self.states_tmp = DoubleArray1D( + array=np.full(shape=dim, fill_value=0.0, dtype=NPDTYPE,) + ) + + cdef void _step(self, ODESystem sys, double time, double[:] states) noexcept: + cdef unsigned int i + cdef double dt2 = self.dt / 2.0 + cdef double dt6 = self.dt / 6.0 + cdef double[:] k1 = self.k1.array + cdef double[:] k2 = self.k2.array + cdef double[:] k3 = self.k3.array + cdef double[:] k4 = self.k4.array + cdef double[:] states_tmp = self.states_tmp.array + + # Compute k1 + sys.evaluate(time, states, k1) + + # Compute k2 + for i in range(self.dim): + states_tmp[i] = states[i] + (dt2 * k1[i]) + sys.evaluate(time + dt2, states_tmp, k2) + + # Compute k3 + for i in range(self.dim): + states_tmp[i] = states[i] + (dt2 * k2[i]) + sys.evaluate(time + dt2, states_tmp, k3) + + # Compute k4 + for i in range(self.dim): + states_tmp[i] = states[i] + self.dt * k3[i] + sys.evaluate(time + self.dt, states_tmp, k4) + + # Update y: y = y + (k1 + 2*k2 + 2*k3 + k4) / 6 + for i in range(self.dim): + states[i] = states[i] + dt6 * (k1[i] + 2.0 * k2[i] + 2.0 * k3[i] + k4[i]) + + def step(self, ODESystem sys, double time, double[:] states): + self._step(sys, time, states) + + +cdef class EulerMaruyamaSolver: + + def __init__ (self, unsigned int dim, double dt): + + super().__init__() + self.dim = dim + self.dt = dt + self.drift = DoubleArray1D( + array=np.full(shape=self.dim, fill_value=0.0, dtype=NPDTYPE,) + ) + self.diffusion = DoubleArray1D( + array=np.full(shape=self.dim, fill_value=0.0, dtype=NPDTYPE,) + ) + + cdef void step(self, SDESystem sys, double time, double[:] state) noexcept: + """ Update stochastic noise process with Euler–Maruyama method (also called the + Euler method) is a method for the approximate numerical solution of a stochastic + differential equation (SDE) """ + + cdef unsigned int i + cdef double[:] drift = self.drift.array + cdef double[:] diffusion = self.diffusion.array + + sys.evaluate_a(time, state, drift) + sys.evaluate_b(time, state, diffusion) + for i in range(self.dim): + state[i] += drift[i]*self.dt + csqrt(self.dt)*diffusion[i] diff --git a/farms_network/numeric/system.py b/farms_network/numeric/system.py new file mode 100644 index 0000000..e69de29 diff --git a/farms_network/numeric/system_cy.pxd b/farms_network/numeric/system_cy.pxd new file mode 100644 index 0000000..a6d3306 --- /dev/null +++ b/farms_network/numeric/system_cy.pxd @@ -0,0 +1,9 @@ +cdef class ODESystem: + + cdef void evaluate(self, double time, double[:] states, double[:] derivatives) noexcept + + +cdef class SDESystem: + + cdef void evaluate_a(self, double time, double[:] states, double[:] drift) noexcept + cdef void evaluate_b(self, double time, double[:] states, double[:] diffusion) noexcept diff --git a/farms_network/numeric/system_cy.pyx b/farms_network/numeric/system_cy.pyx new file mode 100644 index 0000000..8a01b2a --- /dev/null +++ b/farms_network/numeric/system_cy.pyx @@ -0,0 +1,29 @@ +""" Template for an ODE system """ + + +cdef class ODESystem: + """ ODE System """ + + def __init__(self): + """ Initialize """ + ... + + cdef void evaluate(self, double time, double[:] states, double[:] derivatives) noexcept: + """ Evaluate that needs to filled out by an ODE system """ + ... + + +cdef class SDESystem: + """ SDE system of the form: dXt = a(Xt,t) dt + b(Xt,t) dW,""" + + def __init__(self): + """ Initialize """ + ... + + cdef void evaluate_a(self, double time, double[:] states, double[:] drift) noexcept: + """ a(Xt,t) """ + ... + + cdef void evaluate_b(self, double time, double[:] states, double[:] diffusion) noexcept: + """ b(Xt,t) """ + ... diff --git a/farms_network/oscillator.pxd b/farms_network/oscillator.pxd deleted file mode 100644 index 9729036..0000000 --- a/farms_network/oscillator.pxd +++ /dev/null @@ -1,64 +0,0 @@ -""" ------------------------------------------------------------------------ -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ - -Oscillator model. -""" - -from farms_container.parameter cimport Parameter -from farms_network.neuron cimport Neuron - -cdef struct OscillatorNeuronInput: - int neuron_idx - int weight_idx - int phi_idx - -cdef class Oscillator(Neuron): - cdef: - readonly str n_id - - unsigned int num_inputs - - # parameters - # constants - Parameter f - Parameter R - Parameter a - - # states - Parameter phase - Parameter amp - - # inputs - Parameter ext_in - - # ode - Parameter phase_dot - Parameter amp_dot - - # Ouputs - Parameter nout - - # neuron connenctions - OscillatorNeuronInput[:] neuron_inputs - - cdef: - void c_ode_rhs(self, double[:] _y, double[:] _w, double[:] _p) - void c_output(self) - cdef double c_neuron_inputs_eval( - self, double _neuron_out, double _weight, double _phi, - double _phase, double _amp) diff --git a/farms_network/oscillator.pyx b/farms_network/oscillator.pyx deleted file mode 100644 index 93402fe..0000000 --- a/farms_network/oscillator.pyx +++ /dev/null @@ -1,161 +0,0 @@ -""" ------------------------------------------------------------------------ -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ - -Oscillator model -""" -from libc.stdio cimport printf -import farms_pylog as pylog -from libc.math cimport exp -from libc.math cimport M_PI -from libc.math cimport sin as csin -import numpy as np -cimport numpy as cnp - - -cdef class Oscillator(Neuron): - - def __init__(self, n_id, num_inputs, neural_container, **kwargs): - """Initialize. - - Parameters - ---------- - n_id: str - Unique ID for the neuron in the network. - """ - super(Oscillator, self).__init__('leaky') - - # Neuron ID - self.n_id = n_id - # Initialize parameters - self.f = neural_container.parameters.add_parameter( - 'freq_' + self.n_id, kwargs.get('f', 0.1))[0] - - self.R = neural_container.parameters.add_parameter( - 'R_' + self.n_id, kwargs.get('R', 0.1))[0] - - self.a = neural_container.parameters.add_parameter( - 'a_' + self.n_id, kwargs.get('a', 0.1))[0] - - # Initialize states - self.phase = neural_container.states.add_parameter( - 'phase_' + self.n_id, kwargs.get('phase0', 0.0))[0] - self.amp = neural_container.states.add_parameter( - 'amp_' + self.n_id, kwargs.get('amp0', 0.0))[0] - - # External inputs - self.ext_in = neural_container.inputs.add_parameter( - 'ext_in_' + self.n_id)[0] - - # ODE RHS - self.phase_dot = neural_container.dstates.add_parameter( - 'phase_dot_' + self.n_id, 0.0)[0] - self.amp_dot = neural_container.dstates.add_parameter( - 'amp_dot_' + self.n_id, 0.0)[0] - - # Output - self.nout = neural_container.outputs.add_parameter( - 'nout_' + self.n_id, 0.0)[0] - - # Neuron inputs - self.neuron_inputs = cnp.ndarray((num_inputs,), - dtype=[('neuron_idx', 'i'), - ('weight_idx', 'i'), - ('phi_idx', 'i')]) - - self.num_inputs = num_inputs - - def add_ode_input(self, int idx, neuron, neural_container, **kwargs): - """ Add relevant external inputs to the ode.""" - # Create a struct to store the inputs and weights to the neuron - cdef OscillatorNeuronInput n - # Get the neuron parameter - neuron_idx = neural_container.outputs.get_parameter_index( - 'nout_'+neuron.n_id) - - # Add the weight parameter - weight = neural_container.weights.add_parameter( - 'w_' + neuron.n_id + '_to_' + self.n_id, - kwargs.get('weight', 0.0))[0] - phi = neural_container.parameters.add_parameter( - 'phi_' + neuron.n_id + '_to_' + self.n_id, - kwargs.get('phi', 0.0))[0] - - weight_idx = neural_container.weights.get_parameter_index( - 'w_' + neuron.n_id + '_to_' + self.n_id) - phi_idx = neural_container.parameters.get_parameter_index( - 'phi_' + neuron.n_id + '_to_' + self.n_id) - - n.neuron_idx = neuron_idx - n.weight_idx = weight_idx - n.phi_idx = phi_idx - - # Append the struct to the list - self.neuron_inputs[idx] = n - - def output(self): - """Neuron activation function. - Parameters - ---------- - m_potential: float - Neuron membrane potential - """ - return self.c_output() - - def ode_rhs(self, y, w, p): - """ Python interface to the ode_rhs computation.""" - self.c_ode_rhs(y, w, p) - - #################### C-FUNCTIONS #################### - cdef void c_ode_rhs(self, double[:] _y, double[:] _w, double[:] _p): - """ Compute the ODE. Internal Setup Function.""" - - # Current state - cdef double _phase = self.phase.c_get_value() - cdef double _amp = self.amp.c_get_value() - - # Neuron inputs - cdef double _sum = 0.0 - cdef unsigned int j - cdef double _neuron_out - cdef double _weight - cdef double _phi - - for j in range(self.num_inputs): - _neuron_out = _y[self.neuron_inputs[j].neuron_idx] - _weight = _w[self.neuron_inputs[j].weight_idx] - _phi = _p[self.neuron_inputs[j].phi_idx] - _sum += self.c_neuron_inputs_eval(_neuron_out, - _weight, _phi, _phase, _amp) - - # phidot : phase_dot - self.phase_dot.c_set_value(2*M_PI*self.f.c_get_value() + _sum) - - # ampdot - self.amp_dot.c_set_value( - self.a.c_get_value()*(self.R.c_get_value() - _amp) - ) - - cdef void c_output(self): - """ Neuron output. """ - self.nout.c_set_value(self.phase.c_get_value()) - - cdef double c_neuron_inputs_eval( - self, double _neuron_out, double _weight, double _phi, - double _phase, double _amp): - """ Evaluate neuron inputs.""" - return _weight*_amp*csin(_neuron_out - _phase - _phi) diff --git a/farms_network/relu.pxd b/farms_network/relu.pxd deleted file mode 100644 index 027101c..0000000 --- a/farms_network/relu.pxd +++ /dev/null @@ -1,53 +0,0 @@ -""" ------------------------------------------------------------------------ -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ - -Rectified Linear Unit (ReLU) neurons. -""" - -from farms_container.parameter cimport Parameter -from farms_network.neuron cimport Neuron - - -cdef struct ReLUNeuronInput: - int neuron_idx - int weight_idx - - -cdef class ReLUNeuron(Neuron): - cdef: - readonly str n_id - - unsigned int num_inputs - - # Parameters - Parameter gain - Parameter sign - Parameter offset - - # Input from external system - Parameter ext_inp - - # neuron connenctions - ReLUNeuronInput[:] neuron_inputs - - # Ouputs - Parameter nout - - cdef: - void c_ode_rhs(self, double[:] _y, double[:] _w, double[:] _p) - void c_output(self) diff --git a/farms_network/relu.pyx b/farms_network/relu.pyx deleted file mode 100644 index 0f67c5d..0000000 --- a/farms_network/relu.pyx +++ /dev/null @@ -1,132 +0,0 @@ -""" ------------------------------------------------------------------------ -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ - -Sensory afferent neurons. -""" - -cimport numpy as cnp - -from farms_network.neuron import Neuron -from libc.stdio cimport printf - -cdef class ReLUNeuron(Neuron): - """ Rectified Linear Unit neurons connecting """ - - def __init__(self, n_id, num_inputs, neural_container, **kwargs): - """Initialize. - Parameters - ---------- - n_id: str - Unique ID for the neuron in the network. - """ - super().__init__('relu') - - # Neuron ID - self.n_id = n_id - - # Initialize parameters - self.gain = neural_container.parameters.add_parameter( - 'gain_' + self.n_id, kwargs.get('gain', 1.0))[0] - - self.sign = neural_container.parameters.add_parameter( - 'sign_' + self.n_id, kwargs.get('sign', 1.0))[0] - - # assert abs(self.sign.value) != 1.0, "ReLU sign parameter should be 1.0" - - self.offset = neural_container.parameters.add_parameter( - 'offset_' + self.n_id, kwargs.get('offset', 0.0))[0] - - self.ext_inp = neural_container.inputs.add_parameter( - 'ext_' + self.n_id, kwargs.get('init', 0.0))[0] - - # Output - self.nout = neural_container.outputs.add_parameter( - 'nout_' + self.n_id, 0.0)[0] - - # Neuron inputs - self.num_inputs = num_inputs - self.neuron_inputs = cnp.ndarray((num_inputs,), - dtype=[('neuron_idx', 'i'), - ('weight_idx', 'i')]) - - def reset_sensory_param(self, param): - """ Add the sensory input. """ - self.aff_inp = param - - def add_ode_input(self, int idx, neuron, neural_container, **kwargs): - """ Add relevant external inputs to the output. - Parameters - ---------- - """ - - # Create a struct to store the inputs and weights to the neuron - cdef ReLUNeuronInput n - # Get the neuron parameter - neuron_idx = neural_container.outputs.get_parameter_index( - 'nout_'+neuron.n_id) - - # Add the weight parameter - weight = neural_container.weights.add_parameter( - 'w_' + neuron.n_id + '_to_' + self.n_id, kwargs.get('weight', 0.0))[0] - weight_idx = neural_container.weights.get_parameter_index( - 'w_' + neuron.n_id + '_to_' + self.n_id) - n.neuron_idx = neuron_idx - n.weight_idx = weight_idx - - # Append the struct to the list - self.neuron_inputs[idx] = n - - def ode_rhs(self, y, w, p): - """Abstract method""" - self.c_ode_rhs(y, w, p) - - def output(self): - """ Output of the neuron model. - Returns - ---------- - out: - Output of the neuron model - """ - return self.c_output() - - #################### C-FUNCTIONS #################### - - cdef void c_ode_rhs(self, double[:] _y, double[:] _w, double[:] _p): - """ Compute the ODE. Internal Setup Function.""" - - # Neuron inputs - cdef double _sum = 0.0 - cdef unsigned int j - cdef double _neuron_out - cdef double _weight - - for j in range(self.num_inputs): - _neuron_out = _y[self.neuron_inputs[j].neuron_idx] - _weight = _w[self.neuron_inputs[j].weight_idx] - _sum += (_neuron_out*_weight) - self.ext_inp.c_set_value(_sum) - - cdef void c_output(self): - """ Neuron output. """ - # Set the neuron output - cdef double gain = self.gain.c_get_value() - cdef double sign = self.sign.c_get_value() - cdef double offset = self.offset.c_get_value() - cdef double ext_in = self.ext_inp.c_get_value() - cdef double res = gain*(sign*ext_in + offset) - self.nout.c_set_value(max(0.0, res)) diff --git a/farms_network/sensory_neuron.pyx b/farms_network/sensory_neuron.pyx deleted file mode 100644 index 315d970..0000000 --- a/farms_network/sensory_neuron.pyx +++ /dev/null @@ -1,78 +0,0 @@ -""" ------------------------------------------------------------------------ -Copyright 2018-2020 Jonathan Arreguit, Shravan Tata Ramalingasetty -Copyright 2018 BioRobotics Laboratory, École polytechnique fédérale de Lausanne - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------------------------------------------------------------------------ - -Sensory afferent neurons. -""" - -from farms_network.neuron import Neuron -from libc.stdio cimport printf - -cdef class SensoryNeuron(Neuron): - """Sensory afferent neurons connecting muscle model with the network. - """ - - def __init__(self, n_id, num_inputs, neural_container, **kwargs): - """Initialize. - Parameters - ---------- - n_id: str - Unique ID for the neuron in the network. - """ - super(SensoryNeuron, self).__init__('sensory') - - # Neuron ID - self.n_id = n_id - - self.aff_inp = neural_container.inputs.add_parameter( - 'aff_' + self.n_id, kwargs.get('init', 0.0))[0] - - # Output - self.nout = neural_container.outputs.add_parameter( - 'nout_' + self.n_id, 0.0)[0] - - def reset_sensory_param(self, param): - """ Add the sensory input. """ - self.aff_inp = param - - def add_ode_input(self, int idx, neuron, neural_container, **kwargs): - """Abstract method""" - pass - - def ode_rhs(self, y, w, p): - """Abstract method""" - self.c_ode_rhs(y, w, p) - - def output(self): - """ Output of the neuron model. - Returns - ---------- - out: - Output of the neuron model - """ - return self.c_output() - - #################### C-FUNCTIONS #################### - - cdef void c_ode_rhs(self, double[:] _y, double[:] _w, double[:] _p): - """ Compute the ODE. Internal Setup Function.""" - pass - - cdef void c_output(self): - """ Neuron output. """ - # Set the neuron output - self.nout.c_set_value(self.aff_inp.c_get_value()) diff --git a/farms_network/utils/run.py b/farms_network/utils/run.py new file mode 100644 index 0000000..d9f3c03 --- /dev/null +++ b/farms_network/utils/run.py @@ -0,0 +1,43 @@ +""" Run script """ + +from argparse import ArgumentParser + +from farms_core.io.yaml import read_yaml +from farms_network.core.network import PyNetwork +from farms_network.core.options import NetworkOptions +from tqdm import tqdm + + +def run_network(network_options): + + network = PyNetwork.from_options(network_options) + network.setup_integrator(network_options) + + # data.to_file("/tmp/sim.hdf5") + + # Integrate + N_ITERATIONS = network_options.integration.n_iterations + TIMESTEP = network_options.integration.timestep + + inputs_view = network.data.external_inputs.array + for iteration in tqdm(range(0, N_ITERATIONS), colour="green", ascii=" >="): + inputs_view[:] = (iteration / N_ITERATIONS) * 1.0 + network.step() + network.data.times.array[iteration] = iteration*TIMESTEP + + +def main(): + """ Main """ + + parser = ArgumentParser() + parser.add_argument( + "--config_path", "-c", dest="config_path", type=str, required=True + ) + clargs = parser.parse_args() + # run network + options = NetworkOptions.from_options(read_yaml(clargs.config_path)) + run_network(options) + + +if __name__ == '__main__': + main() diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..7363e6d --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,59 @@ +[build-system] +requires = [ + "setuptools", + "Cython >= 0.15.1", + "numpy", +] +build-backend = "setuptools.build_meta" + +[project] +dynamic = ["version"] +name = "farms_network" +description = "Module to generate, develop and visualize neural networks" +readme = "README.md" +license = {file = "LICENSE"} +dependencies = [ + "tqdm", + "networkx", + "numpy" +] +classifiers = [ + "Development Status :: 3 - Beta", + # Indicate who your project is intended for + "Intended Audience :: Science/Research", + "ScieTopic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Scientific/Engineering :: Artificial Life", + # Specify the Python versions you support here. + "Programming Language :: Python :: 3.11", +] + +[project.urls] +Homepage = "https://farmsim.dev" +Documentation = "https://farmsim.dev" +Repository = "https://github.com/farmsim/farms_network.git" +Issues = "https://github.com/farmsim/farms_network/issues" +Changelog = "https://github.com/me/spam/blob/master/CHANGELOG.md" + +[project.optional-dependencies] +gui = [ + "scipy", + "matplotlib", + "seaborn", + "PyQt5", + "networkx", + "pydot", +] +cli = ["rich",] + +[project.scripts] +farms_network = "farms_network.utils.run:main" + +[tool.setuptools.package-data] +farms_muscle = [ + "*.pxd", + "core/*.pxd", + "models/*.pxd", + "numeric/*.pxd", + "noise/*.pxd" +] \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..6282e8b --- /dev/null +++ b/requirements.txt @@ -0,0 +1 @@ +git+https://github.com/farmsim/farms_core.git diff --git a/scratch/profile_network.py b/scratch/profile_network.py new file mode 100644 index 0000000..7d57c50 --- /dev/null +++ b/scratch/profile_network.py @@ -0,0 +1,17 @@ +""" Profile network implementation """ + + +import numpy as np +from farms_core.utils.profile import profile +from farms_network.core import network +from farms_network.core.data import NetworkData, StatesArray +from farms_network.models import li_danner + +nstates = 100 +niterations = int(100e3) +states = StatesArray(np.empty((niterations, nstates))) + +data = NetworkData(nstates=100, states=states) + +net = network.PyNetwork(nnodes=100) +profile(net.test, data) diff --git a/scratch/test_data.py b/scratch/test_data.py new file mode 100644 index 0000000..df94515 --- /dev/null +++ b/scratch/test_data.py @@ -0,0 +1,12 @@ +import numpy as np +from farms_network.core.data import NetworkData, StatesArray + +nstates = 100 +niterations = 1000 +states = StatesArray( + np.empty((niterations, nstates)) +) + +data = NetworkData(nstates=100, states=states) + +print(data.states.array[0, 0]) diff --git a/scratch/test_gui.py b/scratch/test_gui.py new file mode 100644 index 0000000..a6ad2e7 --- /dev/null +++ b/scratch/test_gui.py @@ -0,0 +1,804 @@ +import time +from argparse import ArgumentParser + +import numpy as np +from farms_core.io.yaml import read_yaml +from farms_network.core.network import Network +from farms_network.core.options import NetworkOptions +from farms_network.gui.gui import NetworkGUI +from imgui_bundle import imgui, imgui_ctx, implot +from tqdm import tqdm + + +# From farms_amphibious. To be replaced! +def rotate(vector, theta): + """Rotate vector""" + cos_t, sin_t = np.cos(theta), np.sin(theta) + rotation = np.array(((cos_t, -sin_t), (sin_t, cos_t))) + return np.dot(rotation, vector) + + +def direction(vector1, vector2): + """Unit direction""" + return (vector2-vector1)/np.linalg.norm(vector2-vector1) + + +def connect_positions(source, destination, dir_shift, perp_shift): + """Connect positions""" + connection_direction = direction(source, destination) + connection_perp = rotate(connection_direction, 0.5*np.pi) + new_source = ( + source + + dir_shift*connection_direction + + perp_shift*connection_perp + ) + new_destination = ( + destination + - dir_shift*connection_direction + + perp_shift*connection_perp + ) + return new_source, new_destination + + +def compute_phases(times, data): + phases = (np.array(data) > 0.1).astype(np.int16) + # phases = np.logical_not(phases).astype(np.int16) + phases_xs = [] + phases_ys = [] + for j in range(len(data)): + phases_start = np.where(np.diff(phases[j, :], prepend=0) == 1.0)[0] + phases_ends = np.where(np.diff(phases[j, :], append=0) == -1.0)[0] + phases_xs.append(np.vstack( + (times[phases_start], times[phases_start], times[phases_ends], times[phases_ends]) + ).T) + phases_ys.append(np.ones(np.shape(phases_xs[j]))*j) + # if np.all(len(phases_start) > 3): + phases_ys[j][:, 1] += 1 + phases_ys[j][:, 2] += 1 + + return phases_xs, phases_ys + + +def add_plot(iteration, data): + """ """ + # times = data.times.array[iteration%1000:] + side = "right" + limb = "fore" + plot_names = [ + f"{side}_{limb}_RG_E", + f"{side}_{limb}_RG_F", + f"left_fore_RG_F", + f"right_hind_RG_F", + f"left_hind_RG_F", + f"{side}_{limb}_PF_FA", + f"{side}_{limb}_PF_EA", + f"{side}_{limb}_PF_FB", + f"{side}_{limb}_PF_EB", + f"{side}_{limb}_RG_F_DR", + ] + + plot_labels = [ + "RH_RG_E", + "RH_RG_F", + "LF_RG_F", + "RH_RG_F", + "LH_RG_F", + "RH_PF_FA", + "RH_PF_EA", + "RH_PF_FB", + "RH_PF_EB", + "RH_RG_F_DR", + ] + + nodes_names = [ + node.name + for node in data.nodes + ] + + plot_nodes = [ + nodes_names.index(name) + for name in plot_names + if name in nodes_names + ] + if not plot_nodes: + return + + outputs = np.vstack( + ( + *[ + data.nodes[plot_nodes[j]].output.array + for j in range(len(plot_nodes)) + ], + data.nodes[plot_nodes[-1]].external_input.array, + ) + ) + if iteration < 1000: + plot_data = np.array(outputs[:, :iteration]) + else: + plot_data = np.array(outputs[:, iteration-1000:iteration]) + # plot_data = np.vstack((outputs[iteration%1000:], outputs[:iteration%1000])) + + times = np.array((np.linspace(0.0, 1.0, 1000)*-1.0)[::-1]) + + phases_xs, phases_ys = compute_phases(times, plot_data[1:5, :]) + + # phases = (np.array(plot_data[0, :]) > 0.1).astype(np.int16) + # phases = np.logical_not(phases).astype(np.int16) + # phases_start = np.where(np.diff(phases, prepend=0) == 1.0)[0] + # phases_ends = np.where(np.diff(phases, append=0) == -1.0)[0] + # phases_xs = np.vstack( + # (times[phases_start], times[phases_start], times[phases_ends], times[phases_ends]) + # ).T + # phases_ys = np.ones(np.shape(phases_xs)) + # if len(phases_start) > 3: + # phases_ys[:, 1] += 1.0 + # phases_ys[:, 2] += 1.0 + + colors = { + "RF": imgui.IM_COL32(28, 107, 180, 255), + "LF": imgui.IM_COL32(23, 163, 74, 255), + "RH": imgui.IM_COL32(200, 38, 39, 255), + "LH": imgui.IM_COL32(255, 252, 212, 255), # imgui.IM_COL32(0, 0, 0, 255), + "right_fore_RG_F": imgui.IM_COL32(28, 107, 180, 255), + "left_fore_RG_F": imgui.IM_COL32(23, 163, 74, 255), + "right_hind_RG_F": imgui.IM_COL32(200, 38, 39, 255), + "left_hind_RG_F": imgui.IM_COL32(255, 252, 212, 255), # imgui.IM_COL32(0, 0, 0, 255), + } + with imgui_ctx.begin("States"): + if implot.begin_subplots( + "Network Activity", + 3, + 1, + imgui.ImVec2(-1, -1), + row_col_ratios=implot.SubplotsRowColRatios(row_ratios=[0.1, 0.8, 0.1], col_ratios=[1]) + ): + if implot.begin_plot(""): + flags = ( + implot.AxisFlags_.no_label | implot.AxisFlags_.no_tick_labels | implot.AxisFlags_.no_tick_marks + ) + implot.setup_axis(implot.ImAxis_.y1, "Drive") + implot.setup_axis(implot.ImAxis_.x1, flags=flags) + implot.setup_axis_links(implot.ImAxis_.x1, implot.BoxedValue(-1.0), implot.BoxedValue(0.0)) + implot.setup_axis_limits(implot.ImAxis_.x1, -1.0, 0.0) + implot.setup_axis_limits(implot.ImAxis_.y1, 0.0, 1.5) + implot.setup_axis_limits_constraints(implot.ImAxis_.x1, -1.0, 0.0) + implot.setup_axis_limits_constraints(implot.ImAxis_.y1, 0.0, 1.5) + implot.plot_line("RG-F-Dr", times, plot_data[-1, :]) + implot.end_plot() + if implot.begin_plot(""): + implot.setup_axis(implot.ImAxis_.y1, "Activity") + implot.setup_axis( + implot.ImAxis_.x1, + flags=( + implot.AxisFlags_.no_tick_labels | + implot.AxisFlags_.no_tick_marks + ) + ) + implot.setup_axis_links(implot.ImAxis_.x1, implot.BoxedValue(-1.0), implot.BoxedValue(0.0)) + implot.setup_axis_limits(implot.ImAxis_.y1, -1*len(plot_names), 1.0) + implot.setup_axis_limits_constraints(implot.ImAxis_.x1, -1.0, 0.0) + implot.setup_axis_limits_constraints(implot.ImAxis_.y1, -8.2, 1.0) + implot.setup_axis_ticks( + axis=implot.ImAxis_.y1, + v_min=-8.0, + v_max=0.0, + n_ticks=int(len(plot_names[:-1])), + labels=(plot_labels[:-1])[::-1], + keep_default=False + ) + for j in range(len(plot_nodes[:-1])): + if plot_names[j] in colors: + implot.push_style_color(implot.Col_.line, colors.get(plot_names[j])) + implot.plot_line(plot_names[j], times, plot_data[j, :] - j) + implot.pop_style_color() + else: + implot.plot_line(plot_names[j], times, plot_data[j, :] - j) + implot.end_plot() + if len(plot_nodes) > 7: + if implot.begin_plot("", flags=implot.Flags_.no_legend): + implot.setup_axis_limits(implot.ImAxis_.x1, -1.0, 0.0) + implot.setup_axis_limits(implot.ImAxis_.y1, 0.0, 4.0) + implot.setup_axis_limits_constraints(implot.ImAxis_.x1, -1.0, 0.0) + implot.setup_axis_limits_constraints(implot.ImAxis_.y1, 0.0, 4.0) + implot.setup_axis(implot.ImAxis_.y1, flags=implot.AxisFlags_.invert) + implot.setup_axis_ticks( + axis=implot.ImAxis_.y1, + v_min=0.5, + v_max=3.5, + n_ticks=int(4), + labels=("RF", "LF", "RH", "LH"), + keep_default=False + ) + for j, limb in enumerate(("RF", "LF", "RH", "LH")): + # if len(phases_xs[j]) > 3: + implot.push_style_color( + implot.Col_.fill, + colors[limb] + ) + implot.plot_shaded( + limb, + phases_xs[j].flatten(), + phases_ys[j].flatten(), + yref=j + ) + implot.pop_style_color() + implot.end_plot() + implot.end_subplots() + + +def draw_muscle_activity(iteration, data, plot_nodes, plot_names, title): + + outputs = np.vstack( + [ + data.nodes[plot_nodes[j]].output.array + for j in range(len(plot_nodes)) + ] + ) + if iteration < 1000: + plot_data = np.array(outputs[:, :iteration]) + else: + plot_data = np.array(outputs[:, iteration-1000:iteration]) + + times = np.array((np.linspace(0.0, 1.0, 1000)*-1.0)[::-1]) + + with imgui_ctx.begin(title): + if implot.begin_plot("Muscle Activity", imgui.ImVec2(-1, -1)): + implot.setup_axis(implot.ImAxis_.x1, "Time") + implot.setup_axis(implot.ImAxis_.y1, "Activity") + implot.setup_axis_links(implot.ImAxis_.x1, implot.BoxedValue(-1.0), implot.BoxedValue(0.0)) + implot.setup_axis_limits(implot.ImAxis_.x1, -1.0, 0.0) + implot.setup_axis_limits(implot.ImAxis_.y1, -1*(len(plot_nodes)-1), 1.0) + implot.setup_axis_limits_constraints(implot.ImAxis_.x1, -1.0, 0.0) + implot.setup_axis_ticks( + axis=implot.ImAxis_.y1, + v_min=-1*(len(plot_nodes)-1), + v_max=0.0, + n_ticks=int(len(plot_names)), + labels=plot_names[::-1], + keep_default=False + ) + # implot.setup_axis_limits_constraints(implot.ImAxis_.y1, -5.2, 1.0) + for j in range(len(plot_nodes)): + implot.plot_line(plot_names[j], times, plot_data[j, :] - j) + implot.end_plot() + + +def plot_hind_motor_activity(iteration, data, side="right"): + side = "left" + limb = "hind" + + muscle_names = [ + "bfa", + "ip", + "bfpst", + "rf", + "va", + "mg", + "sol", + "ta", + "ab", + "gm_dorsal", + "edl", + "fdl", + ] + + nodes_names = [ + node.name + for node in data.nodes + ] + + plot_nodes = [ + nodes_names.index(f"{side}_{limb}_{name}_Mn") + for name in muscle_names + if f"{side}_{limb}_{name}_Mn" in nodes_names + ] + draw_muscle_activity(iteration, data, plot_nodes, muscle_names, title="Hindlimb muscles") + + +def plot_fore_motor_activity(iteration, data, side="right"): + side = "right" + limb = "fore" + + muscle_names = [ + "spd", + "ssp", + "abd", + "add", + "tbl", + "tbo", + "bbs", + "bra", + "eip", + "fcu", + ] + + nodes_names = [ + node.name + for node in data.nodes + ] + + plot_nodes = [ + nodes_names.index(f"{side}_{limb}_{name}_Mn") + for name in muscle_names + if f"{side}_{limb}_{name}_Mn" in nodes_names + ] + + draw_muscle_activity(iteration, data, plot_nodes, muscle_names, title="Forelimb muscles") + + +def __draw_muscle_activity(iteration, data): + """ Draw muscle activity """ + side = "left" + limb = "hind" + + muscle_names = [ + "bfa", + "ip", + "bfpst", + "rf", + "va", + "mg", + "sol", + "ta", + "ab", + "gm_dorsal", + "edl", + "fdl", + ] + + nodes_names = [ + node.name + for node in data.nodes + ] + + plot_nodes = [ + nodes_names.index(f"{side}_{limb}_{name}_Mn") + for name in muscle_names + ] + if not plot_nodes: + return + outputs = np.vstack( + [ + data.nodes[plot_nodes[j]].output + for j in range(len(plot_nodes)) + ] + ) + if iteration < 1000: + plot_data = np.array(outputs[:, :iteration]) + else: + plot_data = np.array(outputs[:, iteration-1000:iteration]) + + times = np.array((np.linspace(0.0, 1.0, 1000)*-1.0)[::-1]) + + with imgui_ctx.begin("Muscle activity"): + if implot.begin_plot("Muscle Activity", imgui.ImVec2(-1, -1)): + implot.setup_axis(implot.ImAxis_.x1, "Time") + implot.setup_axis(implot.ImAxis_.y1, "Activity") + implot.setup_axis_links(implot.ImAxis_.x1, -1.0, 0.0) + implot.setup_axis_limits(implot.ImAxis_.x1, -1.0, 0.0) + implot.setup_axis_limits(implot.ImAxis_.y1, -1*len(plot_nodes), 1.0) + implot.setup_axis_limits_constraints(implot.ImAxis_.x1, -1.0, 0.0) + # implot.setup_axis_limits_constraints(implot.ImAxis_.y1, -5.2, 1.0) + for j in range(len(plot_nodes)): + implot.plot_line(muscle_names[j], times, plot_data[j, :] - j) + implot.end_plot() + + # plot_nodes = [ + # nodes_names.index(f"{side}_{limb}_{name}_Rn") + # for name in muscle_names + # ] + # if not plot_nodes: + # return + # outputs = np.vstack( + # [ + # data.nodes[plot_nodes[j]].output + # for j in range(len(plot_nodes)) + # ] + # ) + # if iteration < 1000: + # plot_data = np.array(outputs[:, :iteration]) + # else: + # plot_data = np.array(outputs[:, iteration-1000:iteration]) + + # with imgui_ctx.begin("Renshaw activity"): + # if implot.begin_plot("Renshaw Activity", imgui.ImVec2(-1, -1)): + # implot.setup_axis(implot.ImAxis_.x1, "Time") + # implot.setup_axis(implot.ImAxis_.y1, "Activity") + # implot.setup_axis_links(implot.ImAxis_.x1, -1.0, 0.0) + # implot.setup_axis_limits(implot.ImAxis_.x1, -1.0, 0.0) + # implot.setup_axis_limits(implot.ImAxis_.y1, -1*len(plot_nodes), 1.0) + # implot.setup_axis_limits_constraints(implot.ImAxis_.x1, -1.0, 0.0) + # # implot.setup_axis_limits_constraints(implot.ImAxis_.y1, -5.2, 1.0) + # for j in range(len(plot_nodes)): + # implot.plot_line(muscle_names[j], times, plot_data[j, :] - j) + # implot.end_plot() + + Ia_In_names = ("EA", "EB", "FA", "FB") + plot_nodes = [ + nodes_names.index(f"{side}_{limb}_Ia_In_{name}") + for name in ("EA", "EB", "FA", "FB") + ] + plot_nodes = [ + nodes_names.index(name) + for name in nodes_names + if "Ib_In_e" in name + ] + plot_labels = [ + name + for name in nodes_names + if "Ib_In_e" in name + ] + if not plot_nodes: + return + outputs = np.vstack( + [ + data.nodes[plot_nodes[j]].output + for j in range(len(plot_nodes)) + ] + ) + if iteration < 1000: + plot_data = np.array(outputs[:, :iteration]) + else: + plot_data = np.array(outputs[:, iteration-1000:iteration]) + + with imgui_ctx.begin("Sensory interneuron activity"): + if implot.begin_plot("Sensory interneuron Activity", imgui.ImVec2(-1, -1)): + implot.setup_axis(implot.ImAxis_.x1, "Time") + implot.setup_axis(implot.ImAxis_.y1, "Activity") + implot.setup_axis_links(implot.ImAxis_.x1, -1.0, 0.0) + implot.setup_axis_limits(implot.ImAxis_.x1, -1.0, 0.0) + implot.setup_axis_limits(implot.ImAxis_.y1, -1*len(plot_nodes), 1.0) + implot.setup_axis_limits_constraints(implot.ImAxis_.x1, -1.0, 0.0) + # implot.setup_axis_limits_constraints(implot.ImAxis_.y1, -5.2, 1.0) + for j in range(len(plot_nodes)): + implot.plot_line(plot_labels[j], times, plot_data[j, :] - j) + implot.end_plot() + + +def draw_vn_activity(iteration, data): + """ Draw muscle activity """ + side = "left" + limb = "hind" + + vn_names = [ + f"{side}_{rate}_{axis}_{direction}_In_Vn" + for rate in ("position", "velocity") + for direction in ("clock", "cclock") + for axis in ("pitch", "roll") + for side in ("left", "right") + ] + + nodes_names = [ + node.name + for node in data.nodes + ] + + plot_nodes = [ + nodes_names.index(name) + for name in vn_names + ] + if not plot_nodes: + return + outputs = np.vstack( + + [ + data.nodes[plot_nodes[j]].output.array + for j in range(len(plot_nodes)) + ] + ) + if iteration < 1000: + plot_data = np.array(outputs[:, :iteration]) + else: + plot_data = np.array(outputs[:, iteration-1000:iteration]) + + times = np.array((np.linspace(0.0, 1.0, 1000)*-1.0)[::-1]) + + with imgui_ctx.begin("Vestibular"): + if implot.begin_plot("Vestibular Activity", imgui.ImVec2(-1, -1)): + implot.setup_axis(implot.ImAxis_.x1, "Time") + implot.setup_axis(implot.ImAxis_.y1, "Activity") + implot.setup_axis_links(implot.ImAxis_.x1, -1.0, 0.0) + implot.setup_axis_limits(implot.ImAxis_.x1, -1.0, 0.0) + implot.setup_axis_limits(implot.ImAxis_.y1, -1*len(plot_nodes), 1.0) + implot.setup_axis_limits_constraints(implot.ImAxis_.x1, -1.0, 0.0) + # implot.setup_axis_limits_constraints(implot.ImAxis_.y1, -5.2, 1.0) + for j in range(len(plot_nodes)): + implot.plot_line(vn_names[j], times, plot_data[j, :] - j) + implot.end_plot() + + +def draw_network(network_options, data, iteration, edges_x, edges_y): + """ Draw network """ + + nodes = network_options.nodes + edges = network_options.edges + + imgui.WindowFlags_ + with imgui_ctx.begin("Full-Network"): + flags = ( + implot.AxisFlags_.no_label | + implot.AxisFlags_.no_tick_labels | + implot.AxisFlags_.no_tick_marks + ) + if implot.begin_plot( + "vis", imgui.ImVec2((-1, -1)), implot.Flags_.equal + ): + implot.setup_axis(implot.ImAxis_.x1, flags=flags) + implot.setup_axis(implot.ImAxis_.y1, flags=flags) + implot.plot_line( + "", + xs=edges_x, + ys=edges_y, + flags=implot.LineFlags_.segments + ) + radius = 0.1 + circ_x = radius*np.cos(np.linspace(-np.pi, np.pi, 50)) + circ_y = radius*np.sin(np.linspace(-np.pi, np.pi, 50)) + for index, node in enumerate(nodes): + implot.set_next_marker_style( + size=10.0 # *node.visual.radius + ) + implot.push_style_var( + implot.StyleVar_.fill_alpha, + 0.05+data.nodes[index].output.array[iteration] + ) + implot.plot_scatter( + "##", + xs=np.array((node.visual.position[0],)), + ys=np.array((node.visual.position[1],)), + ) + # implot.plot_line( + # "##", + # node.visual.position[0]+circ_x, + # node.visual.position[1]+circ_y + # ) + implot.pop_style_var() + # implot.push_plot_clip_rect() + # position = implot.plot_to_pixels(implot.Point(node.visual.position[:2])) + # radius = implot.plot_to_pixels(0.001, 0.001) + # color = imgui.IM_COL32(255, 0, 0, 255) + # implot.get_plot_draw_list().add_circle(position, radius[0], color) + # implot.pop_plot_clip_rect() + + # implot.push_plot_clip_rect() + # color = imgui.IM_COL32( + # 100, 185, 0, + # int(255*(data.nodes[index].output[iteration])) + # ) + # implot.get_plot_draw_list().add_circle_filled(position, 7.5, color) + # implot.pop_plot_clip_rect() + implot.plot_text( + node.visual.label.replace("\\textsubscript", "")[0], + node.visual.position[0], + node.visual.position[1], + ) + + implot.end_plot() + + +def draw_slider( + label: str, + name: str, + values: list, + min_value: float = 0.0, + max_value: float = 1.0 +): + with imgui_ctx.begin(name): + clicked, values[0] = imgui.slider_float( + label="alpha", + v=values[0], + v_min=min_value, + v_max=max_value, + ) + clicked, values[1] = imgui.slider_float( + label="drive", + v=values[1], + v_min=min_value, + v_max=max_value, + ) + clicked, values[2] = imgui.slider_float( + label="Ia", + v=values[2], + v_min=min_value, + v_max=max_value, + ) + clicked, values[3] = imgui.slider_float( + label="II", + v=values[3], + v_min=min_value, + v_max=max_value, + ) + clicked, values[4] = imgui.slider_float( + label="Ib", + v=values[4], + v_min=min_value, + v_max=max_value, + ) + clicked, values[5] = imgui.slider_float( + label="Vn", + v=values[5], + v_min=-1.0, + v_max=max_value, + ) + clicked, values[6] = imgui.slider_float( + label="Cut", + v=values[6], + v_min=min_value, + v_max=max_value, + ) + return values + + +def draw_table(network_options, network_data): + """ Draw table """ + flags = ( + imgui.TableFlags_.borders | imgui.TableFlags_.row_bg | imgui.TableFlags_.resizable | \ + imgui.TableFlags_.sortable + ) + with imgui_ctx.begin("Table"): + edges = network_options.edges + nodes = network_options.nodes + n_edges = len(edges) + if imgui.begin_table("Edges", 3, flags): + weights = network_data.connectivity.weights + for col in ("Source", "Target", "Weight"): + imgui.table_setup_column(col) + imgui.table_headers_row() + for row in range(n_edges): + imgui.table_next_row() + imgui.table_set_column_index(0) + imgui.text(edges[row].source) + imgui.table_set_column_index(1) + imgui.text(edges[row].target) + imgui.table_set_column_index(2) + imgui.push_id(row) + _, weights[row] = imgui.input_float("##row", weights[row]) + imgui.pop_id() + imgui.end_table() + + +def draw_play_pause_button(button_state): + """ Draw button """ + + button_title = "Pause" if button_state else "Play" + with imgui_ctx.begin("Controls"): + if imgui.button(button_title): + button_state = not button_state + print(button_state) + return button_state + + +def main(): + """ Main """ + + parser = ArgumentParser() + parser.add_argument( + "--config_path", "-c", dest="config_path", type=str, required=True + ) + clargs = parser.parse_args() + # run network + network_options = NetworkOptions.from_options(read_yaml(clargs.config_path)) + + network = Network.from_options(network_options) + network.setup_integrator(network_options) + + # Integrate + N_ITERATIONS = network_options.integration.n_iterations + TIMESTEP = network_options.integration.timestep + BUFFER_SIZE = network_options.logs.buffer_size + + gui = NetworkGUI() + gui.create_context() + + inputs_view = network.data.external_inputs.array + drive_input = 0.0 + imgui.style_colors_dark() + implot.style_colors_dark() + + edges_xy = np.array( + [ + network_options.nodes[node_idx].visual.position[:2] + for edge in network_options.edges + for node_idx in ( + network_options.nodes.index(edge.source), + network_options.nodes.index(edge.target), + ) + ] + ) + # for index in range(len(edges_xy) - 1): + # edges_xy[index], edges_xy[index + 1] = connect_positions( + # edges_xy[index+1], edges_xy[index], 0.1, 0.0 + # ) + edges_x = np.array(edges_xy[:, 0]) + edges_y = np.array(edges_xy[:, 1]) + + fps = 30.0 + _time_draw = time.time() + _time_draw_last = _time_draw + _realtime = 0.1 + + io = imgui.get_io() + io.config_flags |= imgui.ConfigFlags_.docking_enable + imgui.get_style().anti_aliased_lines = True + imgui.get_style().anti_aliased_lines_use_tex = True + imgui.get_style().anti_aliased_fill = True + + + alpha_input_indices = [ + index + for index, node in enumerate(network_options.nodes) + if "input" in node.name and node.model == "relay" + ] + drive_input_indices = [ + index + for index, node in enumerate(network_options.nodes) + if "DR" in node.name and node.model == "linear" + ] + Ia_input_indices = [ + index + for index, node in enumerate(network_options.nodes) + if "Ia" == node.name[-2:] + ] + II_input_indices = [ + index + for index, node in enumerate(network_options.nodes) + if "II" == node.name[-2:] + ] + Ib_input_indices = [ + index + for index, node in enumerate(network_options.nodes) + if "Ib" == node.name[-2:] + ] + Vn_input_indices = [ + index + for index, node in enumerate(network_options.nodes) + if "Vn" == node.name[-2:] and node.model == "relay" + ] + Cut_input_indices = [ + index + for index, node in enumerate(network_options.nodes) + if "cut" == node.name[-3:] and node.model == "relay" + ] + slider_values = np.zeros((7,)) + slider_values[0] = 1.0 + input_array = np.zeros(np.shape(inputs_view)) + input_array[alpha_input_indices] = 1.0 + button_state = False + # input_array[drive_input_indices[0]] *= 1.05 + for index, node in enumerate(network_options.nodes): + if "BS_DR" in node.name: # and node.model == "linear" + bs_dr = index + + for iteration in tqdm(range(0, N_ITERATIONS), colour="green", ascii=" >="): + input_array[alpha_input_indices] = slider_values[0] + input_array[drive_input_indices] = slider_values[1] + input_array[Ia_input_indices] = slider_values[2] + input_array[II_input_indices] = slider_values[3] + input_array[Ib_input_indices] = slider_values[4] + input_array[Vn_input_indices] = slider_values[5] + input_array[Cut_input_indices] = slider_values[6] + + inputs_view[:] = input_array + network.step() + buffer_iteration = iteration%BUFFER_SIZE + network.data.times.array[buffer_iteration] = (iteration)*TIMESTEP + _time_draw_last = _time_draw + _time_draw = time.time() + fps = _realtime*1/(_time_draw-_time_draw_last)+(1-_realtime)*fps + implot.push_style_var(implot.StyleVar_.line_weight, 2.0) + if not (iteration % 2): + gui.new_frame() + slider_values = draw_slider(label="d", name="Drive", values=slider_values) + add_plot(buffer_iteration, network.data) + # button_state = draw_play_pause_button(button_state) + draw_table(network_options, network.data) + draw_network(network_options, network.data, buffer_iteration, edges_x, edges_y) + # plot_hind_motor_activity(buffer_iteration, network.data) + # plot_fore_motor_activity(buffer_iteration, network.data) + # draw_vn_activity(buffer_iteration, network.data) + gui.render_frame() + implot.pop_style_var() + + +if __name__ == '__main__': + main() diff --git a/scratch/test_network.py b/scratch/test_network.py new file mode 100644 index 0000000..69c8ffb --- /dev/null +++ b/scratch/test_network.py @@ -0,0 +1,212 @@ +""" Test network """ + +from copy import deepcopy +from pprint import pprint + +import matplotlib.pyplot as plt +import networkx as nx +import numpy as np +from farms_core.io.yaml import read_yaml, write_yaml +from farms_core.options import Options +from farms_network.core import options +from farms_network.core.data import (NetworkConnectivity, NetworkData, + NetworkStates) +from farms_network.core.network import Network +from farms_network.numeric.integrators_cy import RK4Solver +from farms_network.core.options import NetworkOptions +from scipy.integrate import ode +from tqdm import tqdm + + +def linear_network(): + """ Linear stateless network """ + param_opts = options.LinearParameterOptions.defaults() + vis_opts = options.NodeVisualOptions() + + network_options = options.NetworkOptions( + directed=True, + multigraph=False, + graph={"name": "network"}, + ) + + network_options.add_node( + options.LinearNodeOptions( + name="node1", + parameters=param_opts, + visual=vis_opts, + ) + ) + return network_options + + +def quadruped_network(): + """ Quadruped network """ + param_opts = options.LIDannerNodeParameterOptions.defaults() + state_opts = options.LINaPDannerStateOptions.from_kwargs(v=0.0, h=-70.0) + vis_opts = options.NodeVisualOptions() + + danner_network = nx.read_graphml( "/Users/tatarama/projects/work/research/neuromechanics/quadruped/mice/mouse-locomotion/data/config/network/siggraph_network.graphml" + ) + + network_options = options.NetworkOptions( + directed=True, + multigraph=False, + graph={"name": "network"}, + logs=options.NetworkLogOptions(n_iterations=5000) + ) + + for node, data in danner_network.nodes.items(): + if data["model"] == "li_nap_danner": + network_options.add_node( + options.LINaPDannerNodeOptions( + name=node, + parameters=param_opts, + visual=vis_opts, + state=state_opts, + noise=None, + ) + ) + else: + network_options.add_node( + options.LIDannerNodeOptions( + name=node, + parameters=param_opts, + visual=vis_opts, + state=state_opts, + noise=None, + ) + ) + + for edge, data in danner_network.edges.items(): + network_options.add_edge( + options.EdgeOptions( + source=edge[0], + target=edge[1], + weight=data["weight"], + type=data.get("type", "excitatory"), + visual=options.EdgeVisualOptions(), + ) + ) + return network_options + + +def oscillator_network(): + """ Oscillator network """ + + param_opts = options.OscillatorNodeParameterOptions.defaults(amplitude_rate=10.0, intrinsic_frequency=1) + state_opts = options.OscillatorStateOptions.from_kwargs( + phase=0.0, amplitude_0=0.0, amplitude=0.0 + ) + vis_opts = options.NodeVisualOptions() + + network_options = options.NetworkOptions( + directed=True, + multigraph=False, + graph={"name": "network"}, + logs=options.NetworkLogOptions(n_iterations=5000) + ) + + network_options.add_node( + options.OscillatorNodeOptions( + name="O1", + parameters=param_opts, + visual=vis_opts, + state=state_opts, + noise=None, + ) + ) + + network_options.add_node( + options.OscillatorNodeOptions( + name="O2", + parameters=param_opts, + visual=vis_opts, + state=state_opts, + noise=None, + ) + ) + + network_options.add_edge( + options.OscillatorEdgeOptions( + source="O1", + target="O2", + weight=0.0, + type="excitatory", + visual=options.EdgeVisualOptions(), + parameters=options.OscillatorEdgeParameterOptions( + phase_difference=np.pi/2 + ) + ) + ) + + network_options.add_edge( + options.OscillatorEdgeOptions( + source="O2", + target="O1", + weight=10.0, + type="excitatory", + visual=options.EdgeVisualOptions(), + parameters=options.OscillatorEdgeParameterOptions( + phase_difference=-np.pi/4 + ) + ) + ) + + return network_options + +# network_options = linear_network() + +network_options = oscillator_network() + +# pprint(network_options) + +# network_options = quadruped_network() + +data = NetworkData.from_options(network_options) + +network = Network.from_options(network_options) + +print(network.nnodes, network.nedges) + +rk4solver = RK4Solver(network.nstates, 1e-3) + +integrator = ode(network.evaluate).set_integrator( + u'dopri5', + method=u'adams', + max_step=0.0, + # nsteps=0 +) + +nnodes = len(network_options.nodes) +integrator.set_initial_value(np.zeros(len(data.states.array[0, :]),), 0.0) + +# print("Data ------------", np.array(network.data.states.array)) + +# data.to_file("/tmp/sim.hdf5") + +integrator.integrate(integrator.t + 1e-3) + +# # Integrate +iterations = network_options.logs.buffer_size +states = np.zeros(np.shape(data.states.array))*1.0 +outputs = np.zeros(np.shape(data.outputs.array))*1.0 +# states[0, 2] = -1.0 + +for iteration in tqdm(range(0, iterations), colour='green', ascii=' >='): + time = iteration*1e-3 + integrator.set_initial_value(integrator.y, integrator.t) + # integrator.integrate(integrator.t+1e-3) + + # states[iteration+1, :] = states[iteration, :] + np.array(network.ode(iteration*1e-3, states[iteration, :]))*1e-3 + # network.data.external_inputs.array[:] = np.ones((1,))*np.sin(iteration*1e-3) + # states[iteration+1, :] = rk4(iteration*1e-3, states[iteration, :], network.ode, step_size=1) + # network.evaluate(integrator.t+(iteration*1e-3), states[iteration, :]) + + rk4solver.step(network._network_cy, time, network.data.states.array[0, :]) + + outputs[iteration, :] = network.data.outputs.array[0, :] + states[iteration, :] = network.data.states.array[0, :] + +# plt.plot(np.linspace(0.0, iterations*1e-3, iterations), np.sin(outputs[:, :])) +plt.plot(np.linspace(0.0, iterations*1e-3, iterations), states[:, :]) +plt.show() diff --git a/scratch/test_neuron.py b/scratch/test_neuron.py new file mode 100644 index 0000000..7a12af2 --- /dev/null +++ b/scratch/test_neuron.py @@ -0,0 +1,66 @@ +import numpy as np +from farms_network.core import network, node, options +from farms_network.core.data import NetworkData + + +nstates = 100 +niterations = 1000 + +net_opts = options.NetworkOptions( + logs=options.NetworkLogOptions( + n_iterations=niterations, + ) +) + +data = NetworkData.from_options(net_opts) + + +net = network.Network(nnodes=10) + +n1_opts = options.NodeOptions( + name="n1", + parameters=options.NodeParameterOptions(), + visual=options.NodeVisualOptions(), + state=options.NodeStateOptions(initial=[0, 0]), +) +n1 = node.Node.from_options(n1_opts) +n1_opts.save("/tmp/opts.yaml") + + +print(n1.name) +n1.name = "n2" +print(n1.model_type) +print(n1.name) + +states = np.empty((1,)) +dstates = np.empty((1,)) +inputs = np.empty((10,)) +weights = np.empty((10,)) +noise = np.empty((10,)) +drive = 0.0 + +print( + n1.ode_rhs(0.0, states, dstates, inputs, weights, noise, drive) +) + +print( + n1.output(0.0, states) +) + +n2 = li_danner.PyLIDannerNode("n2", ninputs=50) + +print(n2.name) +print(n2.model_type) +n2.name = "n2" +print(n2.name) + +states = np.empty((1,)) +dstates = np.empty((1,)) +inputs = np.empty((10,)) +weights = np.empty((10,)) +noise = np.empty((10,)) +drive = 0.0 + +print( + n2.output(0.0, states) +) diff --git a/scratch/test_numeric.py b/scratch/test_numeric.py new file mode 100644 index 0000000..d8a43de --- /dev/null +++ b/scratch/test_numeric.py @@ -0,0 +1,49 @@ +import matplotlib.pyplot as plt +import numpy as np +from farms_core.io.yaml import read_yaml +from farms_network.core import options +from farms_network.noise.ornstein_uhlenbeck import OrnsteinUhlenbeck + +network_options = options.NetworkOptions.from_options( + read_yaml("/tmp/network_options.yaml") +) + +n_dim = 0 +for node in network_options.nodes: + if node.noise is not None: + if (node.noise.model == "ornstein_uhlenbeck") and node.noise.is_stochastic: + n_dim += 1 + + + +timestep = 1e-3 +tau = 1.0 +sigma = 1# np.sqrt(2.0) + +noise_options = [network_options.nodes[1].noise,] + +oo = OrnsteinUhlenbeck(noise_options) + +times = np.linspace(0, 100000*timestep, int(10000)) +print(np.sqrt(2.0*timestep)) + +for initial, mean in zip((10.0, 0.0, -10.0, 0.0), (0.0, 0.0, 0.0, -10.0)): + states = np.zeros((len(times), 1)) + states[0, 0] = initial + noise_options[0].seed = np.random.randint(low=0, high=10000) + noise_options[0].mu = mean + noise_options[0].tau = tau + noise_options[0].sigma = sigma + print(noise_options) + oo = OrnsteinUhlenbeck(noise_options) + drift = np.zeros((len(times), 1)) + diffusion = np.zeros((len(times), 1)) + for index, time in enumerate(times[:-1]): + drift[index, :] = oo.py_evaluate_a(time, states[index, :], drift[index, :]) + diffusion[index, :] = oo.py_evaluate_b(time, states[index, :], diffusion[index, :]) + states[index+1, :] = states[index, :] + drift[index, :]*timestep + np.sqrt(timestep)*diffusion[index, :] + print(np.std(states[500:, 0]), np.mean(states[500:, 0])) + plt.plot(times, states[:, 0]) +plt.xlim([0, times[-1]]) +plt.ylim([-15.0, 15.0]) +plt.show() diff --git a/scratch/test_options.py b/scratch/test_options.py new file mode 100644 index 0000000..d82b991 --- /dev/null +++ b/scratch/test_options.py @@ -0,0 +1,34 @@ +""" Test farms network options """ + + +from pprint import pprint + +import networkx as nx +from farms_core.io.yaml import read_yaml, write_yaml +from farms_core.options import Options +from farms_network.core import options + +param_opts = options.LIDannerParameterOptions.defaults() +state_opts = options.LIDannerNaPStateOptions.from_kwargs(v0=0.0, h0=-70.0) +vis_opts = options.NodeVisualOptions() + +n1_opts = options.LIDannerNodeOptions( + name="n1", + parameters=param_opts, + visual=vis_opts, + state=state_opts, +) + +network = options.NetworkOptions( + directed=True, + multigraph=False, + graph={"name": "network"}, + nodes=[n1_opts, n1_opts], + edges=[], +) + +print(type(network)) +network.save("/tmp/opts.yaml") + +pprint(options.NetworkOptions.load("/tmp/opts.yaml")) +print(type(options.NetworkOptions.load("/tmp/opts.yaml"))) diff --git a/setup.py b/setup.py index 76971d3..047c8ad 100644 --- a/setup.py +++ b/setup.py @@ -1,15 +1,9 @@ -from setuptools import setup, dist, find_packages -from setuptools.extension import Extension - -from farms_container import get_include - -dist.Distribution().fetch_build_eggs(['numpy']) import numpy - -dist.Distribution().fetch_build_eggs(['Cython>=0.15.1']) from Cython.Build import cythonize from Cython.Compiler import Options - +from farms_core import get_include_paths +from setuptools import find_packages, setup +from setuptools.extension import Extension DEBUG = False Options.docstrings = True @@ -17,7 +11,7 @@ Options.generate_cleanup_code = False Options.clear_to_none = True Options.annotate = True -Options.fast_fail = False +Options.fast_fail = True Options.warning_errors = False Options.error_on_unknown_names = True Options.error_on_uninitialized = True @@ -33,132 +27,30 @@ # directive_defaults = Cython.Compiler.Options.get_directive_defaults() extensions = [ - Extension("farms_network.network_generator", - ["farms_network/network_generator.pyx"], - include_dirs=[numpy.get_include(), get_include()], - extra_compile_args=['-ffast-math', '-O3'], - extra_link_args=['-O3'] - ), - Extension("farms_network.oscillator", - ["farms_network/oscillator.pyx"], - include_dirs=[numpy.get_include(), get_include()], - extra_compile_args=['-ffast-math', '-O3'], - extra_link_args=['-O3'] - ), - Extension("farms_network.hopf_oscillator", - ["farms_network/hopf_oscillator.pyx"], - include_dirs=[numpy.get_include(), get_include()], - extra_compile_args=['-ffast-math', '-O3'], - extra_link_args=['-O3'] - ), - Extension("farms_network.morphed_oscillator", - ["farms_network/morphed_oscillator.pyx"], - include_dirs=[numpy.get_include(), get_include()], - extra_compile_args=['-ffast-math', '-O3'], - extra_link_args=['-O3'] - ), - Extension("farms_network.leaky_integrator", - ["farms_network/leaky_integrator.pyx"], - include_dirs=[numpy.get_include(), get_include()], - extra_compile_args=['-ffast-math', '-O3'], - extra_link_args=['-O3'] - ), - Extension("farms_network.neuron", - ["farms_network/neuron.pyx"], - include_dirs=[numpy.get_include(), get_include()], - extra_compile_args=['-ffast-math', '-O3'], - extra_link_args=['-O3'] - ), - Extension("farms_network.lif_danner_nap", - ["farms_network/lif_danner_nap.pyx"], - include_dirs=[numpy.get_include(), get_include()], - extra_compile_args=['-ffast-math', '-O3'], - extra_link_args=['-O3'] - ), - Extension("farms_network.lif_danner", - ["farms_network/lif_danner.pyx"], - include_dirs=[numpy.get_include(), get_include()], - extra_compile_args=['-ffast-math', '-O3'], - extra_link_args=['-O3'] - ), - Extension("farms_network.lif_daun_interneuron", - ["farms_network/lif_daun_interneuron.pyx"], - include_dirs=[numpy.get_include(), get_include()], - extra_compile_args=['-ffast-math', '-O3'], - extra_link_args=['-O3'] - ), - Extension("farms_network.hh_daun_motorneuron", - ["farms_network/hh_daun_motorneuron.pyx"], - include_dirs=[numpy.get_include(), get_include()], - extra_compile_args=['-ffast-math', '-O3'], - extra_link_args=['-O3'] - ), - Extension("farms_network.sensory_neuron", - ["farms_network/sensory_neuron.pyx"], - include_dirs=[numpy.get_include(), get_include()], - extra_compile_args=['-ffast-math', '-O3'], - extra_link_args=['-O3'] - ), - Extension("farms_network.fitzhugh_nagumo", - ["farms_network/fitzhugh_nagumo.pyx"], - include_dirs=[numpy.get_include(), get_include()], - extra_compile_args=['-ffast-math', '-O3'], - extra_link_args=['-O3'] - ), - Extension("farms_network.matsuoka_neuron", - ["farms_network/matsuoka_neuron.pyx"], - include_dirs=[numpy.get_include(), get_include()], - extra_compile_args=['-ffast-math', '-O3'], - extra_link_args=['-O3'] - ), - Extension("farms_network.morris_lecar", - ["farms_network/morris_lecar.pyx"], - include_dirs=[numpy.get_include(), get_include()], - extra_compile_args=['-ffast-math', '-O3'], - extra_link_args=['-O3'] - ), - Extension("farms_network.relu", - ["farms_network/relu.pyx"], - include_dirs=[numpy.get_include(), get_include()], - extra_compile_args=['-ffast-math', '-O3'], - extra_link_args=['-O3'] - ), - Extension("farms_network.integrators", - ["farms_network/integrators.pyx"], - include_dirs=[numpy.get_include(), get_include()], - extra_compile_args=['-ffast-math', '-O3'], - extra_link_args=['-O3'] - ), - Extension("farms_network.utils.ornstein_uhlenbeck", - ["farms_network/utils/ornstein_uhlenbeck.pyx"], - include_dirs=[numpy.get_include(), get_include()], - # libraries=["c", "stdc++"], - extra_compile_args=['-ffast-math', '-O3'], - extra_link_args=['-O3'], - ) + Extension( + f"farms_network.{subpackage}.*", + [f"farms_network/{subpackage}/*.pyx"], + include_dirs=[numpy.get_include(),], + # libraries=["c", "stdc++"], + extra_compile_args=['-O3',], + extra_link_args=['-O3'], + ) + for subpackage in ('core', 'models', 'noise', 'numeric') ] setup( name='farms_network', version='0.1', - description='Module to generate, develop and visualize neural networks', - url='https://gitlab.com/FARMSIM/farms_network.git', - author="Jonathan Arreguit & Shravan Tata Ramalingasetty", - author_email='biorob-farms@groupes.epfl.ch', - license='Apache-2.0', packages=find_packages(exclude=['tests*']), - install_requires=[ - 'farms_pylog @ git+https://gitlab.com/FARMSIM/farms_pylog.git', - 'tqdm', - 'matplotlib', - 'networkx', - 'pydot', - 'scipy' - ], + package_dir={'farms_network': 'farms_network'}, + package_data={'farms_network': [ + f'{folder}*.pxd' + for folder in ['', 'core/', 'models/', 'numeric/', 'noise/'] + ]}, zip_safe=False, ext_modules=cythonize( extensions, - include_path=[numpy.get_include(), get_include(), 'farms_container'], + include_path=[numpy.get_include()] + get_include_paths(), compiler_directives={ # Directives 'binding': False, @@ -189,8 +81,4 @@ 'warn.multiple_declarators': True, } ), - package_data={ - 'farms_network': ['*.pxd'], - 'farms_container': ['*.pxd'], - }, )