From f7c995600a8ad52ff342720560b64b8d98c79cb5 Mon Sep 17 00:00:00 2001 From: Legend101Zz <96632943+Legend101Zz@users.noreply.github.com> Date: Sat, 14 Feb 2026 00:55:40 +0530 Subject: [PATCH 01/29] feat: cppyy codegen backend working changes --- brian2/codegen/__init__.py | 2 +- brian2/codegen/_prefs.py | 5 +- brian2/codegen/generators/cppyy_generator.py | 253 +++++ brian2/codegen/runtime/__init__.py | 14 +- brian2/codegen/runtime/cppyy_rt/__init__.py | 24 + brian2/codegen/runtime/cppyy_rt/cppyy_rt.py | 500 +++++++++ .../runtime/cppyy_rt/extension_manager.py | 963 ++++++++++++++++++ .../cppyy_rt/templates/common_group.cpp | 67 ++ .../cppyy_rt/templates/group_variable_get.cpp | 23 + .../group_variable_get_conditional.cpp | 16 + .../cppyy_rt/templates/group_variable_set.cpp | 12 + .../group_variable_set_conditional.cpp | 16 + .../cppyy_rt/templates/ratemonitor.cpp | 20 + .../runtime/cppyy_rt/templates/reset.cpp | 15 + .../cppyy_rt/templates/statemonitor.cpp | 24 + .../cppyy_rt/templates/stateupdate.cpp | 18 + .../cppyy_rt/templates/summed_variable.cpp | 18 + .../runtime/cppyy_rt/templates/threshold.cpp | 17 + brian2/codegen/targets.py | 4 + brian2/devices/device.py | 50 +- 20 files changed, 2049 insertions(+), 12 deletions(-) create mode 100644 brian2/codegen/generators/cppyy_generator.py create mode 100644 brian2/codegen/runtime/cppyy_rt/__init__.py create mode 100644 brian2/codegen/runtime/cppyy_rt/cppyy_rt.py create mode 100644 brian2/codegen/runtime/cppyy_rt/extension_manager.py create mode 100644 brian2/codegen/runtime/cppyy_rt/templates/common_group.cpp create mode 100644 brian2/codegen/runtime/cppyy_rt/templates/group_variable_get.cpp create mode 100644 brian2/codegen/runtime/cppyy_rt/templates/group_variable_get_conditional.cpp create mode 100644 brian2/codegen/runtime/cppyy_rt/templates/group_variable_set.cpp create mode 100644 brian2/codegen/runtime/cppyy_rt/templates/group_variable_set_conditional.cpp create mode 100644 brian2/codegen/runtime/cppyy_rt/templates/ratemonitor.cpp create mode 100644 brian2/codegen/runtime/cppyy_rt/templates/reset.cpp create mode 100644 brian2/codegen/runtime/cppyy_rt/templates/statemonitor.cpp create mode 100644 brian2/codegen/runtime/cppyy_rt/templates/stateupdate.cpp create mode 100644 brian2/codegen/runtime/cppyy_rt/templates/summed_variable.cpp create mode 100644 brian2/codegen/runtime/cppyy_rt/templates/threshold.cpp diff --git a/brian2/codegen/__init__.py b/brian2/codegen/__init__.py index 4f75b39f8..9971b932e 100644 --- a/brian2/codegen/__init__.py +++ b/brian2/codegen/__init__.py @@ -10,4 +10,4 @@ from . import _prefs from . import cpp_prefs as _cpp_prefs -__all__ = ["NumpyCodeObject", "CythonCodeObject"] +__all__ = ["NumpyCodeObject", "CythonCodeObject", "CppyyCodeObject"] diff --git a/brian2/codegen/_prefs.py b/brian2/codegen/_prefs.py index 6e3273246..638e6303a 100644 --- a/brian2/codegen/_prefs.py +++ b/brian2/codegen/_prefs.py @@ -22,9 +22,12 @@ Can be a string, in which case it should be one of: * ``'auto'`` the default, automatically chose the best code generation - target available. + target available. Priority order: cython > cppyy > numpy. * ``'cython'``, uses the Cython package to generate C++ code. Needs a working installation of Cython and a C++ compiler. + * ``'cppyy'``, uses cppyy for JIT compilation via LLVM/Cling. Needs + cppyy installed but no external C++ compiler. Provides fast in-memory + compilation without filesystem I/O. * ``'numpy'`` works on all platforms and doesn't need a C compiler but is often less efficient. diff --git a/brian2/codegen/generators/cppyy_generator.py b/brian2/codegen/generators/cppyy_generator.py new file mode 100644 index 000000000..b5d66af78 --- /dev/null +++ b/brian2/codegen/generators/cppyy_generator.py @@ -0,0 +1,253 @@ +""" +C++ code generator for the cppyy runtime target. + +Inherits CPPCodeGenerator's full translation pipeline (expressions, the +read→declare→execute→write phases, scalar hoisting, boolean optimization). +Overrides array naming and keyword generation so data arrives from Python +as function parameters rather than global C++ variables. +""" + +from __future__ import annotations + +from typing import Any + +from brian2.codegen.generators.cpp_generator import ( + CPPCodeGenerator, + c_data_type, + stripped_deindented_lines, +) +from brian2.core.functions import DEFAULT_FUNCTIONS, Function +from brian2.core.variables import ( + ArrayVariable, + AuxiliaryVariable, + Constant, + DynamicArrayVariable, + Subexpression, +) + +# (c_type, param_name, namespace_key) +FunctionParam = tuple[str, str, str] + + +def _cppyy_c_data_type(dtype: type | Any) -> str: + """ + Like c_data_type but maps bool→int8_t instead of char. + + cppyy is strict about buffer types: numpy int8 maps to signed char (int8_t), + not char. Using int8_t in the signature lets the buffer protocol match. + The function body still uses char for locals — implicit conversion handles it. + """ + ctype: str = c_data_type(dtype) + if ctype == "char": + return "int8_t" + return ctype + + +class CppyyCodeGenerator(CPPCodeGenerator): + """ + C++ code generator targeting cppyy's JIT runtime. + + All C++ translation logic (expressions, 4-phase pattern, etc.) is inherited. + We only change how arrays are named and how keywords/params are assembled. + """ + + class_name: str = "cppyy" + + @staticmethod + def get_array_name(var: ArrayVariable, access_data: bool = True) -> str: + """ + Globally unique name for an array variable. + + access_data=True → "_ptr_array_{owner}_{name}" (data pointer) + access_data=False → "_dynamic_array_{owner}_{name}" (container object) + """ + owner_name: str = getattr(var.owner, "name", "temporary") + + if isinstance(var, DynamicArrayVariable): + if access_data: + return f"_ptr_array_{owner_name}_{var.name}" + else: + return f"_dynamic_array_{owner_name}_{var.name}" + elif isinstance(var, ArrayVariable): + return f"_ptr_array_{owner_name}_{var.name}" + else: + raise TypeError( + f"get_array_name called with non-array variable: {type(var)}" + ) + + def determine_keywords(self) -> dict[str, Any]: + """ + Build template keywords: function params, support code, hash defines. + + This runs at the end of translate_statement_sequence(). The returned + dict gets merged with scalar_code/vector_code and passed to templates. + + We iterate sorted(self.variables.items()) — the code object's + _build_param_mapping does the same, so parameter order is guaranteed + to match between the signature and the call site. + """ + from brian2.devices.device import get_device + + device: Any = get_device() + + support_code_parts: list[str] = [] + hash_define_parts: list[str] = [] + user_functions: list[Any] = [] + user_func_namespaces: dict[ + str, Any + ] = {} # for setting C++ globals post-compile + added: set[str] = set() + + function_params: list[FunctionParam] = [] + handled_pointers: set[str] = set() + + for varname, var in sorted(self.variables.items()): + if isinstance(var, (AuxiliaryVariable, Subexpression)): + continue + + # --- User functions (TimedArray, BinomialFunction, etc.) --- + if isinstance(var, Function): + if self.codeobj_class in var.implementations: + result: tuple | None = self._add_user_function(varname, var, added) + if result is not None: + hd, _pointers, sc, uf = result + hash_define_parts.extend(hd) + support_code_parts.extend(sc) + user_functions.extend(uf) + + # Grab namespace values (actual numpy arrays) for C++ globals + impl = var.implementations[self.codeobj_class] + func_ns: dict[str, Any] | None = impl.get_namespace(self.owner) + if func_ns: + user_func_namespaces.update(func_ns) + continue + + # --- Constants: scalar typed parameters --- + if isinstance(var, Constant): + c_type: str = _cppyy_c_data_type(type(var.value)) + function_params.append((c_type, varname, varname)) + continue + + # --- Array variables: pointer + size parameters --- + if isinstance(var, ArrayVariable): + pointer_name: str = self.get_array_name(var) + if pointer_name in handled_pointers: + continue + handled_pointers.add(pointer_name) + + # Skip multidimensional dynamic arrays (need special handling) + if getattr(var, "ndim", 1) > 1: + continue + + c_type = _cppyy_c_data_type(var.dtype) + namespace_key: str = device.get_array_name(var) + + function_params.append((f"{c_type}*", pointer_name, namespace_key)) + + if not var.scalar: + function_params.append(("int", f"_num{varname}", f"_num{varname}")) + + # Optional denormals flushing (gcc/clang x86) + denormals_code: str = "" + if self.flush_denormals: + denormals_code = """ + #define CSR_FLUSH_TO_ZERO (1 << 15) + unsigned csr = __builtin_ia32_stmxcsr(); + csr |= CSR_FLUSH_TO_ZERO; + __builtin_ia32_ldmxcsr(csr); + """ + + return { + "support_code_lines": "\n".join( + stripped_deindented_lines("\n".join(support_code_parts)) + ), + "hashdefine_lines": "\n".join( + stripped_deindented_lines("\n".join(hash_define_parts)) + ), + "denormals_code_lines": "\n".join( + stripped_deindented_lines(denormals_code) + ), + "function_params": function_params, + "user_func_namespaces": user_func_namespaces, + "user_functions": user_functions, + } + + +# --- Function implementations --- +# +# We get sin/cos/exp/log/etc. for free via MRO (registered on CPPCodeGenerator). +# Same for arcsin→asin, int→int_, exprel, TimedArray, BinomialFunction. +# +# We must explicitly register clip/sign/timestep/poisson — they're only on +# CythonCodeGenerator which isn't in our MRO chain. + +_clip_code: str = """ +template +inline T _clip(T value, double a_min, double a_max) { + if (value < (T)a_min) return (T)a_min; + if (value > (T)a_max) return (T)a_max; + return value; +} +""" +DEFAULT_FUNCTIONS["clip"].implementations.add_implementation( + CppyyCodeGenerator, code=_clip_code, name="_clip" +) + +_sign_code: str = """ +template +inline int _sign(T x) { + return (T(0) < x) - (x < T(0)); +} +""" +DEFAULT_FUNCTIONS["sign"].implementations.add_implementation( + CppyyCodeGenerator, code=_sign_code, name="_sign" +) + +_timestep_code: str = """ +inline int64_t _timestep(double t, double dt) { + return (int64_t)((t + 1e-3*dt)/dt); +} +""" +DEFAULT_FUNCTIONS["timestep"].implementations.add_implementation( + CppyyCodeGenerator, code=_timestep_code, name="_timestep" +) + +_poisson_code: str = """ +#include +inline int32_t _poisson(double lam, int _vectorisation_idx) { + std::poisson_distribution _poisson_dist(lam); + return _poisson_dist(_brian_cppyy_rng); +} +""" +DEFAULT_FUNCTIONS["poisson"].implementations.add_implementation( + CppyyCodeGenerator, code=_poisson_code, name="_poisson" +) + +# rand/randn use the shared MT19937 engine from _ensure_support_code() +_rand_support: str = """ +inline double _rand(const int _vectorisation_idx) { + static std::uniform_real_distribution _dist_rand(0.0, 1.0); + return _dist_rand(_brian_cppyy_rng); +} +""" + +_randn_support: str = """ +inline double _randn(const int _vectorisation_idx) { + static std::normal_distribution _dist_randn(0.0, 1.0); + return _dist_randn(_brian_cppyy_rng); +} +""" + +DEFAULT_FUNCTIONS["rand"].implementations.add_dynamic_implementation( + CppyyCodeGenerator, + code=lambda owner: {"support_code": _rand_support}, + namespace=lambda owner: {}, + name="_rand", +) + +DEFAULT_FUNCTIONS["randn"].implementations.add_dynamic_implementation( + CppyyCodeGenerator, + code=lambda owner: {"support_code": _randn_support}, + namespace=lambda owner: {}, + name="_randn", +) diff --git a/brian2/codegen/runtime/__init__.py b/brian2/codegen/runtime/__init__.py index 361097246..96aeb6b95 100644 --- a/brian2/codegen/runtime/__init__.py +++ b/brian2/codegen/runtime/__init__.py @@ -2,7 +2,7 @@ Runtime targets for code generation. """ -# Register the base category before importing the indivial codegen targets with +# Register the base category before importing the individual codegen targets with # their subcategories from brian2.core.preferences import prefs @@ -15,12 +15,22 @@ logger = get_logger(__name__) +# Always available from .numpy_rt import * +# Optional: Cython (requires Cython + C++ compiler) try: from .cython_rt import * except ImportError: - pass # todo: raise a warning? + logger.debug("Cython runtime not available", exc_info=True) + +# Optional: cppyy (requires cppyy, no external compiler needed) +try: + from .cppyy_rt import * +except ImportError: + logger.debug("cppyy runtime not available", exc_info=True) + +# Optional: GSL integration try: from .GSLcython_rt import * except ImportError: diff --git a/brian2/codegen/runtime/cppyy_rt/__init__.py b/brian2/codegen/runtime/cppyy_rt/__init__.py new file mode 100644 index 000000000..b48bd2470 --- /dev/null +++ b/brian2/codegen/runtime/cppyy_rt/__init__.py @@ -0,0 +1,24 @@ +""" +cppyy Runtime Backend for Brian2. +""" + +from __future__ import annotations + +from brian2.utils.logger import get_logger + +logger = get_logger(__name__) + +try: + from brian2.codegen.runtime.cppyy_rt.cppyy_rt import CppyyCodeObject + from brian2.codegen.targets import codegen_targets + + # Register the target (same pattern as numpy_rt and cython_rt) + codegen_targets.add(CppyyCodeObject) + + __all__ = ["CppyyCodeObject"] + logger.debug("cppyy runtime backend registered") + +except ImportError as e: + logger.debug(f"cppyy runtime backend not available: {e}") + __all__ = [] + CppyyCodeObject = None diff --git a/brian2/codegen/runtime/cppyy_rt/cppyy_rt.py b/brian2/codegen/runtime/cppyy_rt/cppyy_rt.py new file mode 100644 index 000000000..a29907225 --- /dev/null +++ b/brian2/codegen/runtime/cppyy_rt/cppyy_rt.py @@ -0,0 +1,500 @@ +""" +cppyy runtime code object for Brian2. + +Each code block (before_run, run, after_run) becomes a C++ function JIT-compiled +by cppyy/Cling. Functions receive all data as typed parameters — numpy arrays get +passed as raw pointers with zero-copy via cppyy's buffer protocol. + +Three naming worlds need to stay in sync: + 1. RuntimeDevice: "_array_neurongroup_v" + 2. C++ params: "_ptr_array_neurongroup_v" + 3. C++ body: "_ptr_array_neurongroup_v[_idx]" + +(2) and (3) match automatically. We bridge (1)→(2) in variables_to_namespace(). +""" + +from __future__ import annotations + +import importlib.util +from collections.abc import Callable +from typing import Any + +import numpy as np +from numpy.typing import NDArray + +from brian2.core.base import BrianObjectException +from brian2.core.functions import Function +from brian2.core.preferences import BrianPreference, prefs +from brian2.core.variables import ( + ArrayVariable, + AuxiliaryVariable, + Constant, + DynamicArrayVariable, + Subexpression, + Variable, +) +from brian2.utils.logger import get_logger + +from ...codeobject import check_compiler_kwds +from ...generators.cpp_generator import c_data_type +from ...generators.cppyy_generator import CppyyCodeGenerator +from ...targets import codegen_targets +from ...templates import Templater +from ..numpy_rt import NumpyCodeObject + +__all__: list[str] = ["CppyyCodeObject"] + +logger = get_logger(__name__) + +# --- Type aliases --- +# (cpp_param_name, namespace_key, c_type_string) +ParamTuple = tuple[str, str, str] +# (namespace_key, callable that returns current value) +NonconstantEntry = tuple[str, Callable[[], Any]] + +# --- Preferences --- +prefs.register_preferences( + "codegen.runtime.cppyy", + "cppyy runtime codegen preferences", + extra_compile_args=BrianPreference( + default=[], + docs="Extra flags passed to cppyy/Cling, e.g. ['-O2', '-ffast-math'].", + ), +) + +# --- Lazy cppyy import --- +_cppyy: Any = None + + +def _get_cppyy() -> Any: + """Import cppyy on first use so we don't blow up at import time.""" + global _cppyy + if _cppyy is None: + try: + import cppyy + + _cppyy = cppyy + except ImportError: + raise ImportError( + "cppyy is required for the cppyy runtime target. " + "Install it with: pip install cppyy" + ) from None + return _cppyy + + +def _cppyy_c_data_type(dtype: type | np.dtype) -> str: + """ + Like c_data_type but maps bool→int8_t instead of char. + + cppyy enforces strict type matching on buffers: numpy's bool_ viewed as + int8 needs int8_t in the signature, not char (which is a distinct type in C++). + """ + ctype: str = c_data_type(dtype) + if ctype == "char": + return "int8_t" + return ctype + + +# --- One-time support code init --- +_support_code_initialized: bool = False + + +def _ensure_support_code() -> None: + """ + Define universal C++ helpers exactly once in cppyy's interpreter. + + Covers: standard headers, Brian2's _brian_mod/_brian_pow/etc., int_(), + and the shared MT19937 RNG engine. Guarded so repeated calls are no-ops. + """ + global _support_code_initialized + if _support_code_initialized: + return + + cppyy = _get_cppyy() + from brian2.codegen.generators.cpp_generator import _universal_support_code + + guarded_code: str = f""" + #ifndef _BRIAN2_CPPYY_SUPPORT_CODE + #define _BRIAN2_CPPYY_SUPPORT_CODE + + #include + #include + #include + #include + #include + #include + + #ifndef M_PI + #define M_PI 3.14159265358979323846 + #endif + + #ifndef INFINITY + #define INFINITY (std::numeric_limits::infinity()) + #endif + + // Brian2 universal support code: type promotion, _brian_mod, _brian_floordiv, etc. + {_universal_support_code} + + // int_() — standalone gets this from stdint_compat.h, we define it here + template + inline int32_t int_(T value) {{ return static_cast(value); }} + + // Shared RNG for rand/randn/poisson + // TODO: hook into Brian's seed() system for reproducibility + static std::mt19937 _brian_cppyy_rng; + + #endif // _BRIAN2_CPPYY_SUPPORT_CODE + """ + cppyy.cppdef(guarded_code) + _support_code_initialized = True + + +def _make_func_name(codeobj_name: str, block: str) -> str: + """ + Build a deterministic C++ function name from code object + block name. + Must match the Jinja2 template logic in common_group.cpp. + """ + safe: str = codeobj_name.replace(".", "_").replace("*", "").replace("-", "_") + return f"_brian_cppyy_{block}_{safe}" + + +def _cppyy_constant_or_scalar(varname: str, variable: Variable) -> str: + """ + Like constant_or_scalar but uses _ptr_array_X naming to match our C++ params. + + The standard version produces "_array_X[0]" (device naming), but our + function signatures use "_ptr_array_X" (generator naming). + """ + if variable.array: + return f"{CppyyCodeGenerator.get_array_name(variable)}[0]" + else: + return f"{varname}" + + +class CppyyCodeObject(NumpyCodeObject): + """ + Code object that JIT-compiles C++ via cppyy/Cling. + + Inherits NumpyCodeObject's lifecycle but overrides namespace population + to set up _ptr_array_* and _num* entries that our C++ functions expect. + """ + + templater: Templater = Templater( + "brian2.codegen.runtime.cppyy_rt", + ".cpp", + env_globals={ + "c_data_type": _cppyy_c_data_type, + "constant_or_scalar": _cppyy_constant_or_scalar, + }, + ) + generator_class: type = CppyyCodeGenerator + class_name: str = "cppyy" + + def __init__( + self, + owner: Any, + code: Any, + variables: dict[str, Variable], + variable_indices: dict[str, str], + template_name: str, + template_source: str, + compiler_kwds: dict[str, Any], + name: str = "cppyy_code_object*", + ) -> None: + check_compiler_kwds(compiler_kwds, [], "cppyy") + super().__init__( + owner, + code, + variables, + variable_indices, + template_name, + template_source, + compiler_kwds={}, + name=name, + ) + # Populated in compile() — maps block → parameter metadata + self._param_mappings: dict[str, list[ParamTuple]] = {} + # Prevent GC of arrays whose pointers are held by C++ globals + self._namespace_refs: dict[str, NDArray[Any]] = {} + + @classmethod + def is_available(cls) -> bool: + """Check if cppyy is installed without importing it.""" + return importlib.util.find_spec("cppyy") is not None + + # --- Namespace population --- + # + # We override entirely (not calling super) because NumpyCodeObject + # doesn't set _num* entries and uses device naming instead of generator naming. + + def variables_to_namespace(self) -> None: + """ + Fill self.namespace with everything the C++ functions need. + + Arrays go under generator naming (_ptr_array_*), sizes under _num*, + constants under their plain name, and Variable objects under _var_*. + """ + self.nonconstant_values: list[NonconstantEntry] = [] + + for name, var in self.variables.items(): + if isinstance(var, Function): + self._insert_func_namespace(var) + continue + + if isinstance(var, (AuxiliaryVariable, Subexpression)): + continue + + # Try to get the value — some dummy Variables don't have one + try: + if not hasattr(var, "get_value"): + raise TypeError() + value: Any = var.get_value() + except (TypeError, AttributeError): + self.namespace[name] = var + continue + + if isinstance(var, ArrayVariable): + gen_name: str = self.generator_class.get_array_name(var) + self.namespace[gen_name] = value + self.namespace[f"_num{name}"] = var.get_len() + + # Scalar constants also get a plain-name entry with the unwrapped value + if var.scalar and var.constant: + self.namespace[name] = value.item() + else: + self.namespace[name] = value + + # Dynamic arrays: store the container object too + if isinstance(var, DynamicArrayVariable): + dyn_name: str = self.generator_class.get_array_name( + var, access_data=False + ) + self.namespace[dyn_name] = self.device.get_value(var, access_data=False) + + self.namespace[f"_var_{name}"] = var + + # Track dynamic arrays that get resized externally (e.g. spike monitors) + if isinstance(var, DynamicArrayVariable) and var.needs_reference_update: + gen_name = self.generator_class.get_array_name(var) + self.nonconstant_values.append((gen_name, var.get_value)) + self.nonconstant_values.append((f"_num{name}", var.get_len)) + + def update_namespace(self) -> None: + """Refresh data pointers/sizes for dynamic arrays that may have been resized.""" + for name, func in self.nonconstant_values: + self.namespace[name] = func() + + def _insert_func_namespace(self, func: Function) -> None: + """ + Pull in a function implementation's namespace (e.g. TimedArray data). + Most built-in functions have nothing to inject; this is a no-op for them. + """ + try: + impl = func.implementations[self.__class__] + except KeyError: + return + + func_namespace: dict[str, Any] | None = impl.get_namespace(self.owner) + if func_namespace is not None: + self.namespace.update(func_namespace) + + if impl.dependencies is not None: + for dep in impl.dependencies.values(): + self._insert_func_namespace(dep) + + # --- Parameter mapping --- + # + # Reconstructs the same param list the generator built in determine_keywords(). + # Both iterate sorted(self.variables.items()) with the same filtering, so order matches. + + def _build_param_mapping(self) -> list[ParamTuple]: + """ + Build the (cpp_param_name, namespace_key, c_type) list matching the + C++ function signature order. + """ + params: list[ParamTuple] = [] + handled_pointers: set[str] = set() + + for varname, var in sorted(self.variables.items()): + if isinstance(var, (AuxiliaryVariable, Subexpression)): + continue + if isinstance(var, Function): + continue + + if isinstance(var, Constant): + c_type: str = _cppyy_c_data_type(type(var.value)) + params.append((varname, varname, c_type)) + continue + + if isinstance(var, ArrayVariable): + pointer_name: str = self.generator_class.get_array_name(var) + if pointer_name in handled_pointers: + continue + handled_pointers.add(pointer_name) + + if getattr(var, "ndim", 1) > 1: + continue + + c_type = _cppyy_c_data_type(var.dtype) + namespace_key: str = self.generator_class.get_array_name(var) + + params.append((pointer_name, namespace_key, f"{c_type}*")) + + if not var.scalar: + params.append((f"_num{varname}", f"_num{varname}", "int")) + + return params + + # --- Compilation --- + + def compile_block(self, block: str) -> Any | None: + """ + JIT-compile a code block and wire up any user-function globals. + Returns the compiled function, or None for empty blocks. + """ + code: str = getattr(self.code, block, "").strip() + if not code or "EMPTY_CODE_BLOCK" in code: + return None + + cppyy = _get_cppyy() + _ensure_support_code() + + logger.diagnostic(f"cppyy: compiling '{block}' for {self.name}") + try: + print(f"\n{'=' * 60}") + print(f"CPPYY COMPILE: {self.name} / block={block}") + print(f"{'=' * 60}") + print(code) + print(f"{'=' * 60}\n") + cppyy.cppdef(code) + print("\nCPPYY GLOBAL NAMESPACE:") + print([x for x in dir(cppyy.gbl) if "_brian_" in x]) + except Exception as exc: + raise BrianObjectException( + f"cppyy compilation failed for '{block}' of '{self.name}'.\n" + f"Generated C++ code:\n{code}\n", + self.owner, + ) from exc + + func_name: str = _make_func_name(self.name, block) + try: + compiled_func: Any = getattr(cppyy.gbl, func_name) + except AttributeError: + raise RuntimeError( + f"cppyy compiled OK but function '{func_name}' not found. " + f"Template/name mismatch? codeobj={self.name}, block={block}" + ) from None + + # Wire up static C++ globals for user functions (e.g. TimedArray data pointers) + self._set_user_func_globals(cppyy) + + self._param_mappings[block] = self._build_param_mapping() + print(f"\nPARAM MAPPING for {self.name}.{block}:") + for i, (cpp_name, ns_key, ctype) in enumerate(self._param_mappings[block]): + val = self.namespace.get(ns_key, "MISSING") + if hasattr(val, "shape"): + val_desc = f"ndarray shape={val.shape} dtype={val.dtype}" + elif hasattr(val, "get_size"): + val_desc = f"DynamicArray size={val.get_size()}" + else: + val_desc = f"{type(val).__name__} = {val}" + print(f" [{i}] {ctype:20s} {cpp_name:40s} <- ns[{ns_key}] = {val_desc}") + return compiled_func + + def _set_user_func_globals(self, cppyy: Any) -> None: + """ + Point C++ static globals (e.g. `static double* _namespace_timedarray_values`) + at the actual numpy data. Also pins the arrays to prevent GC. + """ + for _name, var in self.variables.items(): + if not isinstance(var, Function): + continue + try: + impl = var.implementations[self.__class__] + except KeyError: + continue + + func_namespace: dict[str, Any] | None = impl.get_namespace(self.owner) + if not func_namespace: + continue + + for ns_key, ns_value in func_namespace.items(): + if hasattr(ns_value, "dtype") and ns_value.ndim >= 1: + cpp_global_name: str = f"_namespace{ns_key}" + try: + setattr(cppyy.gbl, cpp_global_name, ns_value) + self._namespace_refs[ns_key] = ns_value + logger.diagnostic( + f"cppyy: set global {cpp_global_name} → " + f"array shape {ns_value.shape}" + ) + except AttributeError: + logger.warn( + f"Could not set C++ global '{cpp_global_name}' for " + f"'{ns_key}'. May segfault if the function is called." + ) + + # --- Execution --- + + def run_block(self, block: str) -> None: + """ + Call a compiled C++ function with args extracted from self.namespace. + + cppyy does the numpy→pointer conversion automatically: a float64 array + passed where C++ expects double* gets its buffer pointer extracted with + zero copies. + """ + compiled_func: Any | None = self.compiled_code.get(block) + if compiled_func is None: + return + + try: + param_mapping: list[ParamTuple] = self._param_mappings[block] + args: list[Any] = [] + + for cpp_name, ns_key, c_type in param_mapping: + val: Any = self.namespace.get(ns_key) + + if val is None: + # Naming bridge bug — log and limp along with a zero + logger.warn( + f"Namespace key '{ns_key}' missing for param " + f"'{cpp_name}' ({c_type}) in {self.name}.{block}. " + f"Keys: {sorted(self.namespace.keys())[:20]}..." + ) + if "*" in c_type: + args.append(np.zeros(1, dtype=np.float64)) + else: + args.append(0) + else: + if isinstance(val, np.ndarray): + val = np.ascontiguousarray(val) + # bool arrays need int8 view so cppyy's buffer protocol matches + if val.dtype == np.bool_: + val = val.view(np.int8) + args.append(val) + # print(f"\nCALLING {self.name}.{block} with {len(args)} args:") + # for i, (cpp_name, _, ctype) in enumerate(param_mapping): + # arg = args[i] + # if isinstance(arg, np.ndarray): + # print( + # f" [{i}] {cpp_name}: ndarray({arg.shape}, {arg.dtype}) " + # f"first={arg.flat[0] if arg.size > 0 else 'empty'}" + # ) + # else: + # print(f" [{i}] {cpp_name}: {type(arg).__name__} = {arg}") + compiled_func(*args) + + except Exception as exc: + raise BrianObjectException( + f"Exception during '{block}' of '{self.name}'.\n", + self.owner, + ) from exc + + +codegen_targets.add(CppyyCodeObject) + +# NOTE: rand/randn/clip/sign/timestep/poisson implementations are registered +# on CppyyCodeGenerator (in cppyy_generator.py), not here. This is intentional — +# the generator needs them during code generation, and FunctionImplementationContainer +# finds them via MRO fallback. Registering on both causes shadowing bugs. diff --git a/brian2/codegen/runtime/cppyy_rt/extension_manager.py b/brian2/codegen/runtime/cppyy_rt/extension_manager.py new file mode 100644 index 000000000..c3b59da7a --- /dev/null +++ b/brian2/codegen/runtime/cppyy_rt/extension_manager.py @@ -0,0 +1,963 @@ +""" +Extension manager for cppyy runtime backend. + +This module provides caching, lifecycle management, and utility functions +for the cppyy-based code generation backend. Unlike the Cython extension +manager which deals with filesystem-based compilation, this manager handles +in-memory compilation and function caching. + +Key responsibilities: +1. Function cache management (in-memory, content-addressed) +2. Compilation lock management (thread safety) +3. Infrastructure initialization (one-time setup) +4. Diagnostics and statistics + +""" + +from __future__ import annotations + +import hashlib +import threading +import time +from collections import OrderedDict +from dataclasses import dataclass, field +from typing import Any + +from brian2.utils.logger import get_logger + +__all__ = [ + "CppyyExtensionManager", + "CppyyFunctionCache", + "CppyyInfrastructure", + "get_extension_manager", +] + +logger = get_logger(__name__) + + +# ============================================================================= +# Statistics Tracking +# ============================================================================= + + +@dataclass +class CompilationStats: + """ + Statistics for cppyy compilation performance. + + Attributes: + total_compilations: Total number of compilations performed. + cache_hits: Number of times a cached function was reused. + cache_misses: Number of times compilation was required. + total_compile_time: Cumulative time spent compiling (seconds). + total_code_size: Cumulative size of compiled code (bytes). + evictions: Number of functions evicted from cache. + """ + + total_compilations: int = 0 + cache_hits: int = 0 + cache_misses: int = 0 + total_compile_time: float = 0.0 + total_code_size: int = 0 + evictions: int = 0 + errors: int = 0 + + def record_compilation(self, compile_time: float, code_size: int) -> None: + """Record a successful compilation.""" + self.total_compilations += 1 + self.cache_misses += 1 + self.total_compile_time += compile_time + self.total_code_size += code_size + + def record_cache_hit(self) -> None: + """Record a cache hit.""" + self.cache_hits += 1 + + def record_eviction(self) -> None: + """Record a cache eviction.""" + self.evictions += 1 + + def record_error(self) -> None: + """Record a compilation error.""" + self.errors += 1 + + @property + def hit_rate(self) -> float: + """Calculate cache hit rate.""" + total = self.cache_hits + self.cache_misses + return self.cache_hits / total if total > 0 else 0.0 + + @property + def average_compile_time(self) -> float: + """Calculate average compilation time.""" + if self.total_compilations == 0: + return 0.0 + return self.total_compile_time / self.total_compilations + + def __str__(self) -> str: + return ( + f"CompilationStats(\n" + f" compilations={self.total_compilations},\n" + f" cache_hits={self.cache_hits},\n" + f" cache_misses={self.cache_misses},\n" + f" hit_rate={self.hit_rate:.1%},\n" + f" avg_compile_time={self.average_compile_time * 1000:.1f}ms,\n" + f" total_code_size={self.total_code_size / 1024:.1f}KB,\n" + f" evictions={self.evictions},\n" + f" errors={self.errors}\n" + f")" + ) + + +# ============================================================================= +# Function Cache +# ============================================================================= + + +@dataclass +class CachedFunction: + """ + A cached compiled function with metadata. + + Attributes: + func: The compiled cppyy function proxy. + code_hash: SHA256 hash of the source code. + function_name: Name of the C++ function. + created_at: Timestamp when the function was compiled. + last_used: Timestamp when the function was last called. + use_count: Number of times the function has been called. + code_size: Size of the source code in bytes. + """ + + func: Any + code_hash: str + function_name: str + created_at: float = field(default_factory=time.time) + last_used: float = field(default_factory=time.time) + use_count: int = 0 + code_size: int = 0 + + def touch(self) -> None: + """Update last_used timestamp and increment use count.""" + self.last_used = time.time() + self.use_count += 1 + + +class CppyyFunctionCache: + """ + Thread-safe LRU cache for compiled cppyy functions. + + This cache stores compiled C++ functions keyed by a hash of their source + code. It uses an LRU (Least Recently Used) eviction policy when the cache + exceeds its maximum size. + + The cache is designed to be shared across all CppyyCodeObject instances + within a process to maximize code reuse. + + Thread Safety: + All public methods are thread-safe and protected by a reentrant lock. + + Attributes: + max_size: Maximum number of functions to cache. + _cache: OrderedDict mapping code hashes to CachedFunction objects. + _lock: Threading lock for thread-safe access. + _stats: Compilation statistics tracker. + """ + + def __init__(self, max_size: int = 1000) -> None: + """ + Initialize the function cache. + + Args: + max_size: Maximum number of functions to cache. When exceeded, + least recently used functions are evicted. + """ + self._max_size = max_size + self._cache: OrderedDict[str, CachedFunction] = OrderedDict() + self._lock = threading.RLock() + self._stats = CompilationStats() + + # Reverse lookup: function name -> code hash + self._name_to_hash: dict[str, str] = {} + + @property + def max_size(self) -> int: + """Get the maximum cache size.""" + return self._max_size + + @max_size.setter + def max_size(self, value: int) -> None: + """Set the maximum cache size, evicting if necessary.""" + with self._lock: + self._max_size = value + self._evict_if_needed() + + def get(self, code_hash: str) -> Any | None: + """ + Get a cached function by its code hash. + + Args: + code_hash: SHA256 hash of the C++ source code. + + Returns: + The compiled function proxy, or None if not found. + """ + with self._lock: + if code_hash in self._cache: + cached = self._cache[code_hash] + cached.touch() + # Move to end (most recently used) + self._cache.move_to_end(code_hash) + self._stats.record_cache_hit() + return cached.func + return None + + def put( + self, + code_hash: str, + func: Any, + function_name: str, + code_size: int = 0, + ) -> None: + """ + Store a compiled function in the cache. + + Args: + code_hash: SHA256 hash of the C++ source code. + func: The compiled cppyy function proxy. + function_name: Name of the C++ function. + code_size: Size of the source code in bytes. + """ + with self._lock: + # Remove existing entry if present + if code_hash in self._cache: + old_entry = self._cache.pop(code_hash) + if old_entry.function_name in self._name_to_hash: + del self._name_to_hash[old_entry.function_name] + + # Evict if necessary + self._evict_if_needed() + + # Add new entry + cached = CachedFunction( + func=func, + code_hash=code_hash, + function_name=function_name, + code_size=code_size, + ) + self._cache[code_hash] = cached + self._name_to_hash[function_name] = code_hash + + def _evict_if_needed(self) -> None: + """Evict least recently used entries if cache is over capacity.""" + while len(self._cache) >= self._max_size: + # Pop the oldest entry (first item in OrderedDict) + code_hash, cached = self._cache.popitem(last=False) + if cached.function_name in self._name_to_hash: + del self._name_to_hash[cached.function_name] + self._stats.record_eviction() + logger.debug(f"Evicted cached function: {cached.function_name}") + + def contains(self, code_hash: str) -> bool: + """Check if a code hash is in the cache.""" + with self._lock: + return code_hash in self._cache + + def get_by_name(self, function_name: str) -> Any | None: + """ + Get a cached function by its function name. + + Args: + function_name: Name of the C++ function. + + Returns: + The compiled function proxy, or None if not found. + """ + with self._lock: + code_hash = self._name_to_hash.get(function_name) + if code_hash is not None: + return self.get(code_hash) + return None + + def clear(self) -> None: + """Clear all cached functions.""" + with self._lock: + self._cache.clear() + self._name_to_hash.clear() + logger.debug("Function cache cleared") + + def __len__(self) -> int: + """Return the number of cached functions.""" + with self._lock: + return len(self._cache) + + @property + def stats(self) -> CompilationStats: + """Get compilation statistics.""" + return self._stats + + def get_info(self) -> dict[str, Any]: + """ + Get detailed information about the cache state. + + Returns: + Dictionary with cache information. + """ + with self._lock: + entries = [] + for code_hash, cached in self._cache.items(): + entries.append( + { + "function_name": cached.function_name, + "code_hash": code_hash[:16] + "...", + "use_count": cached.use_count, + "code_size": cached.code_size, + "age_seconds": time.time() - cached.created_at, + } + ) + + return { + "size": len(self._cache), + "max_size": self._max_size, + "stats": str(self._stats), + "entries": entries, + } + + +# ============================================================================= +# Infrastructure Manager +# ============================================================================= + + +class CppyyInfrastructure: + """ + Singleton managing cppyy initialization and shared C++ infrastructure. + + This class handles one-time loading of: + - Standard C++ headers + - Brian2 common macros and type definitions + - Random number generator infrastructure + - DynamicArray and SpikeQueue implementations + + The infrastructure is initialized lazily on first use and shared + across all CppyyCodeObject instances. + + Thread Safety: + Initialization is protected by a lock to prevent race conditions. + """ + + _instance: CppyyInfrastructure | None = None + _initialized: bool = False + _lock = threading.Lock() + + def __new__(cls) -> CppyyInfrastructure: + """Ensure singleton pattern.""" + if cls._instance is None: + with cls._lock: + if cls._instance is None: + cls._instance = super().__new__(cls) + return cls._instance + + def __init__(self) -> None: + """Initialize the infrastructure (only runs once).""" + pass # Actual init is in ensure_initialized() + + def ensure_initialized(self) -> None: + """ + Ensure the cppyy infrastructure is initialized. + + This method is idempotent and thread-safe. It only performs + initialization once, even if called multiple times. + """ + if CppyyInfrastructure._initialized: + return + + with CppyyInfrastructure._lock: + if CppyyInfrastructure._initialized: + return + + self._load_infrastructure() + CppyyInfrastructure._initialized = True + + def _load_infrastructure(self) -> None: + """Load all required C++ infrastructure into cppyy.""" + import cppyy + + logger.debug("Initializing cppyy infrastructure...") + start_time = time.time() + + # Load standard headers + cppyy.include("") + cppyy.include("") + cppyy.include("") + cppyy.include("") + cppyy.include("") + cppyy.include("") + cppyy.include("") + + # Define common infrastructure + cppyy.cppdef(self._get_common_definitions()) + cppyy.cppdef(self._get_dynamic_array_definitions()) + cppyy.cppdef(self._get_spike_queue_definitions()) + cppyy.cppdef(self._get_random_definitions()) + + elapsed = time.time() - start_time + logger.debug(f"cppyy infrastructure initialized in {elapsed * 1000:.1f}ms") + + def _get_common_definitions(self) -> str: + """Get common C++ definitions.""" + return """ +#ifndef BRIAN2_CPPYY_COMMON +#define BRIAN2_CPPYY_COMMON + +#include +#include +#include + +namespace brian2_cppyy { + +// Integer types +using std::int8_t; +using std::int16_t; +using std::int32_t; +using std::int64_t; +using std::uint8_t; +using std::uint16_t; +using std::uint32_t; +using std::uint64_t; +using std::size_t; + +// Clip function +template +inline T _clip(T value, T min_val, T max_val) { + return std::min(std::max(value, min_val), max_val); +} + +// Integer division (floor division matching Python) +inline int64_t _floordiv(int64_t a, int64_t b) { + int64_t q = a / b; + int64_t r = a % b; + if ((r != 0) && ((r < 0) != (b < 0))) { + q -= 1; + } + return q; +} + +// Modulo matching Python semantics +inline int64_t _mod(int64_t a, int64_t b) { + int64_t r = a % b; + if ((r != 0) && ((r < 0) != (b < 0))) { + r += b; + } + return r; +} + +// Sign function +template +inline int _sign(T val) { + return (T(0) < val) - (val < T(0)); +} + +// Boolean conversion +inline int _bool_to_int(bool b) { + return b ? 1 : 0; +} + +} // namespace brian2_cppyy + +#endif // BRIAN2_CPPYY_COMMON +""" + + def _get_dynamic_array_definitions(self) -> str: + """Get DynamicArray C++ definitions.""" + return """ +#ifndef BRIAN2_CPPYY_DYNAMIC_ARRAY +#define BRIAN2_CPPYY_DYNAMIC_ARRAY + +#include +#include +#include + +namespace brian2_cppyy { + +template +class DynamicArray1D { +private: + std::vector _data; + +public: + DynamicArray1D() = default; + explicit DynamicArray1D(size_t size) : _data(size) {} + DynamicArray1D(size_t size, T value) : _data(size, value) {} + + // Data access + T* data() noexcept { return _data.data(); } + const T* data() const noexcept { return _data.data(); } + + // Size operations + size_t size() const noexcept { return _data.size(); } + bool empty() const noexcept { return _data.empty(); } + void resize(size_t new_size) { _data.resize(new_size); } + void resize(size_t new_size, T value) { _data.resize(new_size, value); } + void reserve(size_t capacity) { _data.reserve(capacity); } + void clear() { _data.clear(); } + + // Element access + T& operator[](size_t idx) { return _data[idx]; } + const T& operator[](size_t idx) const { return _data[idx]; } + T& at(size_t idx) { return _data.at(idx); } + const T& at(size_t idx) const { return _data.at(idx); } + + // Modification + void push_back(const T& value) { _data.push_back(value); } + void push_back(T&& value) { _data.push_back(std::move(value)); } + void pop_back() { _data.pop_back(); } + + // Iterators + typename std::vector::iterator begin() { return _data.begin(); } + typename std::vector::iterator end() { return _data.end(); } + typename std::vector::const_iterator begin() const { return _data.begin(); } + typename std::vector::const_iterator end() const { return _data.end(); } +}; + +template +class DynamicArray2D { +private: + std::vector> _data; + size_t _cols; + +public: + DynamicArray2D() : _cols(0) {} + explicit DynamicArray2D(size_t rows) : _data(rows), _cols(0) {} + DynamicArray2D(size_t rows, size_t cols) + : _data(rows, std::vector(cols)), _cols(cols) {} + DynamicArray2D(size_t rows, size_t cols, T value) + : _data(rows, std::vector(cols, value)), _cols(cols) {} + + // Row access + std::vector& operator[](size_t idx) { return _data[idx]; } + const std::vector& operator[](size_t idx) const { return _data[idx]; } + + // Size operations + size_t size() const noexcept { return _data.size(); } + size_t rows() const noexcept { return _data.size(); } + size_t cols() const noexcept { return _cols; } + bool empty() const noexcept { return _data.empty(); } + + void resize(size_t new_rows) { + _data.resize(new_rows); + for (auto& row : _data) { + row.resize(_cols); + } + } + + void resize(size_t new_rows, size_t new_cols) { + _cols = new_cols; + _data.resize(new_rows); + for (auto& row : _data) { + row.resize(new_cols); + } + } + + void clear() { + _data.clear(); + _cols = 0; + } +}; + +} // namespace brian2_cppyy + +#endif // BRIAN2_CPPYY_DYNAMIC_ARRAY +""" + + def _get_spike_queue_definitions(self) -> str: + """Get SpikeQueue C++ definitions.""" + return """ +#ifndef BRIAN2_CPPYY_SPIKE_QUEUE +#define BRIAN2_CPPYY_SPIKE_QUEUE + +#include +#include +#include + +namespace brian2_cppyy { + +class SpikeQueue { +private: + std::vector> _queue; + size_t _current_idx; + size_t _n_delays; + int32_t* _delays; + size_t _n_synapses; + int32_t _source_start; + int32_t _source_end; + +public: + SpikeQueue() + : _current_idx(0) + , _n_delays(1) + , _delays(nullptr) + , _n_synapses(0) + , _source_start(0) + , _source_end(0) + { + _queue.resize(1); + } + + SpikeQueue(int32_t source_start, int32_t source_end) + : _current_idx(0) + , _n_delays(1) + , _delays(nullptr) + , _n_synapses(0) + , _source_start(source_start) + , _source_end(source_end) + { + _queue.resize(1); + } + + void prepare(int32_t* delays, size_t n_delays, size_t n_synapses) { + _delays = delays; + _n_delays = n_delays > 0 ? n_delays : 1; + _n_synapses = n_synapses; + + _queue.clear(); + _queue.resize(_n_delays); + _current_idx = 0; + } + + void push(int32_t* spike_indices, int n_spikes) { + if (n_spikes == 0) return; + + // Simple implementation: push all spikes to current slot + // Full implementation would use delays + auto& current = _queue[_current_idx % _n_delays]; + for (int i = 0; i < n_spikes; ++i) { + current.push_back(spike_indices[i]); + } + } + + std::vector* peek() { + return &_queue[_current_idx % _n_delays]; + } + + const std::vector* peek() const { + return &_queue[_current_idx % _n_delays]; + } + + void advance() { + _queue[_current_idx % _n_delays].clear(); + _current_idx++; + } + + size_t size() const { + return _queue[_current_idx % _n_delays].size(); + } + + void clear() { + for (auto& slot : _queue) { + slot.clear(); + } + _current_idx = 0; + } +}; + +} // namespace brian2_cppyy + +#endif // BRIAN2_CPPYY_SPIKE_QUEUE +""" + + def _get_random_definitions(self) -> str: + """Get random number generator C++ definitions.""" + return """ +#ifndef BRIAN2_CPPYY_RANDOM +#define BRIAN2_CPPYY_RANDOM + +#include +#include + +namespace brian2_cppyy { + +// Thread-local random engine for each compilation unit +thread_local std::mt19937_64 _rng; +thread_local bool _rng_seeded = false; + +inline void seed_rng(uint64_t seed) { + _rng.seed(seed); + _rng_seeded = true; +} + +inline void ensure_rng_seeded() { + if (!_rng_seeded) { + std::random_device rd; + _rng.seed(rd()); + _rng_seeded = true; + } +} + +inline double _rand() { + ensure_rng_seeded(); + std::uniform_real_distribution dist(0.0, 1.0); + return dist(_rng); +} + +inline double _randn() { + ensure_rng_seeded(); + std::normal_distribution dist(0.0, 1.0); + return dist(_rng); +} + +inline int32_t _poisson(double lambda) { + ensure_rng_seeded(); + if (lambda <= 0) return 0; + std::poisson_distribution dist(lambda); + return dist(_rng); +} + +inline double _rand_uniform(double low, double high) { + ensure_rng_seeded(); + std::uniform_real_distribution dist(low, high); + return dist(_rng); +} + +inline int32_t _rand_int(int32_t low, int32_t high) { + ensure_rng_seeded(); + std::uniform_int_distribution dist(low, high - 1); + return dist(_rng); +} + +inline double _rand_exponential(double beta) { + ensure_rng_seeded(); + std::exponential_distribution dist(1.0 / beta); + return dist(_rng); +} + +inline double _rand_gamma(double alpha, double beta) { + ensure_rng_seeded(); + std::gamma_distribution dist(alpha, beta); + return dist(_rng); +} + +} // namespace brian2_cppyy + +#endif // BRIAN2_CPPYY_RANDOM +""" + + @property + def is_initialized(self) -> bool: + """Check if infrastructure is initialized.""" + return CppyyInfrastructure._initialized + + @classmethod + def reset(cls) -> None: + """ + Reset the infrastructure state (mainly for testing). + + Warning: This does not unload the C++ definitions from cppyy, + as that's not possible. It only resets the initialization flag. + """ + with cls._lock: + cls._initialized = False + cls._instance = None + + +# ============================================================================= +# Extension Manager +# ============================================================================= + + +class CppyyExtensionManager: + """ + Central manager for the cppyy runtime backend. + + This class coordinates: + - Function caching + - Infrastructure initialization + - Compilation with thread safety + - Statistics and diagnostics + + It provides a high-level interface for CppyyCodeObject to use + for compiling and caching C++ code. + """ + + _instance: CppyyExtensionManager | None = None + _lock = threading.Lock() + + def __new__(cls) -> CppyyExtensionManager: + """Ensure singleton pattern.""" + if cls._instance is None: + with cls._lock: + if cls._instance is None: + cls._instance = super().__new__(cls) + cls._instance._initialized = False + return cls._instance + + def __init__(self) -> None: + """Initialize the extension manager.""" + if self._initialized: + return + + from brian2.core.preferences import prefs + + # Get cache size from preferences (with fallback) + try: + cache_size = prefs["codegen.runtime.cppyy.cache_size"] + except KeyError: + cache_size = 1000 + + self._cache = CppyyFunctionCache(max_size=cache_size) + self._infrastructure = CppyyInfrastructure() + self._compile_lock = threading.Lock() + self._initialized = True + + @property + def cache(self) -> CppyyFunctionCache: + """Get the function cache.""" + return self._cache + + @property + def infrastructure(self) -> CppyyInfrastructure: + """Get the infrastructure manager.""" + return self._infrastructure + + @property + def stats(self) -> CompilationStats: + """Get compilation statistics.""" + return self._cache.stats + + def ensure_initialized(self) -> None: + """Ensure infrastructure is initialized.""" + self._infrastructure.ensure_initialized() + + def compile( + self, + code: str, + function_name: str, + force: bool = False, + ) -> Any: + """ + Compile C++ code and return the function proxy. + + This method handles: + 1. Computing code hash + 2. Checking cache + 3. Thread-safe compilation + 4. Caching the result + + Args: + code: C++ source code to compile. + function_name: Name of the function in the code. + force: If True, recompile even if cached. + + Returns: + The compiled cppyy function proxy. + + Raises: + RuntimeError: If compilation fails. + """ + import cppyy + + # Ensure infrastructure is ready + self.ensure_initialized() + + # Compute hash + code_hash = hashlib.sha256(code.encode()).hexdigest() + + # Check cache (unless forced) + if not force: + cached = self._cache.get(code_hash) + if cached is not None: + logger.debug(f"Cache hit for {function_name}") + return cached + + # Compile with lock + with self._compile_lock: + # Double-check cache (another thread may have compiled) + if not force: + cached = self._cache.get(code_hash) + if cached is not None: + return cached + + logger.debug(f"Compiling {function_name}...") + start_time = time.time() + + try: + cppyy.cppdef(code) + func = getattr(cppyy.gbl, function_name) + except Exception as e: + self._cache.stats.record_error() + raise RuntimeError( + f"Failed to compile {function_name}: {e}\n" + f"Code:\n{self._format_code(code)}" + ) from e + + compile_time = time.time() - start_time + code_size = len(code.encode()) + + # Update stats + self._cache.stats.record_compilation(compile_time, code_size) + + # Cache the result + self._cache.put(code_hash, func, function_name, code_size) + + logger.debug( + f"Compiled {function_name} in {compile_time * 1000:.1f}ms " + f"({code_size} bytes)" + ) + + return func + + def _format_code(self, code: str, max_lines: int = 50) -> str: + """Format code with line numbers for error messages.""" + lines = code.split("\n") + if len(lines) > max_lines: + # Show first and last portions + half = max_lines // 2 + formatted_lines = [] + for i, line in enumerate(lines[:half], 1): + formatted_lines.append(f"{i:4d} | {line}") + formatted_lines.append( + f" | ... ({len(lines) - max_lines} lines omitted) ..." + ) + for i, line in enumerate(lines[-half:], len(lines) - half + 1): + formatted_lines.append(f"{i:4d} | {line}") + return "\n".join(formatted_lines) + else: + return "\n".join(f"{i:4d} | {line}" for i, line in enumerate(lines, 1)) + + def clear_cache(self) -> None: + """Clear the function cache.""" + self._cache.clear() + + def get_diagnostics(self) -> dict[str, Any]: + """ + Get diagnostic information about the extension manager. + + Returns: + Dictionary with diagnostic information. + """ + return { + "infrastructure_initialized": self._infrastructure.is_initialized, + "cache_info": self._cache.get_info(), + "stats": str(self._cache.stats), + } + + +# ============================================================================= +# Module-level Access +# ============================================================================= + +# Global extension manager instance +_extension_manager: CppyyExtensionManager | None = None + + +def get_extension_manager() -> CppyyExtensionManager: + """ + Get the global extension manager instance. + + Returns: + The singleton CppyyExtensionManager instance. + """ + global _extension_manager + if _extension_manager is None: + _extension_manager = CppyyExtensionManager() + return _extension_manager diff --git a/brian2/codegen/runtime/cppyy_rt/templates/common_group.cpp b/brian2/codegen/runtime/cppyy_rt/templates/common_group.cpp new file mode 100644 index 000000000..6fe24a5eb --- /dev/null +++ b/brian2/codegen/runtime/cppyy_rt/templates/common_group.cpp @@ -0,0 +1,67 @@ + +{% set _safe_name = codeobj_name | replace(".", "_") | replace("*", "") | replace("-", "_") %} + +{# ── Helper: build the parameter list for a C++ function signature ── #} +{% macro param_list() %} +{% for c_type, param_name, ns_key in function_params %}{{ c_type }} {{ param_name }}{% if not loop.last %}, {% endif %}{% endfor %} +{% endmacro %} + + +{# ══════════════════════════════════════════════════════════════════════ #} +{# BLOCK: before_run — runs once before simulation starts #} +{# ══════════════════════════════════════════════════════════════════════ #} +{% macro before_run() %} +{% set _func_name = "_brian_cppyy_before_run_" + _safe_name %} + +// Per-codeobject support code (user functions, hashdefines) +{{ hashdefine_lines }} +{{ support_code_lines }} + +extern "C" void {{ _func_name }}({{ param_list() }}) { + {{ denormals_code_lines }} + {% block before_code %} + // EMPTY_CODE_BLOCK + {% endblock %} +} +{% endmacro %} + + +{# ══════════════════════════════════════════════════════════════════════ #} +{# BLOCK: run — the main simulation step, runs every timestep #} +{# ══════════════════════════════════════════════════════════════════════ #} +{% macro run() %} +{% set _func_name = "_brian_cppyy_run_" + _safe_name %} + +// Per-codeobject support code +{{ hashdefine_lines }} +{{ support_code_lines }} + +// Template-specific support code (e.g. synaptic queue access) +{% block template_support_code %} +{% endblock %} + +extern "C" void {{ _func_name }}({{ param_list() }}) { + {{ denormals_code_lines }} + {% block maincode %} + {% endblock %} +} +{% endmacro %} + + +{# ══════════════════════════════════════════════════════════════════════ #} +{# BLOCK: after_run — runs once after simulation completes #} +{# ══════════════════════════════════════════════════════════════════════ #} +{% macro after_run() %} +{% set _func_name = "_brian_cppyy_after_run_" + _safe_name %} + +// Per-codeobject support code +{{ hashdefine_lines }} +{{ support_code_lines }} + +extern "C" void {{ _func_name }}({{ param_list() }}) { + {{ denormals_code_lines }} + {% block after_code %} + // EMPTY_CODE_BLOCK + {% endblock %} +} +{% endmacro %} diff --git a/brian2/codegen/runtime/cppyy_rt/templates/group_variable_get.cpp b/brian2/codegen/runtime/cppyy_rt/templates/group_variable_get.cpp new file mode 100644 index 000000000..ecb98d733 --- /dev/null +++ b/brian2/codegen/runtime/cppyy_rt/templates/group_variable_get.cpp @@ -0,0 +1,23 @@ +{# Get variable values template for cppyy backend #} +{# USES_VARIABLES { _group_idx } #} +{% extends 'common_group.cpp' %} + +{% block maincode %} + //// MAIN CODE //////////// + {% set c_type = cpp_dtype(variables['_variable'].dtype) %} + + const size_t _vectorisation_idx = 1; + const int _num_indices = _num{{ _group_idx }}; + + // Allocate output array (returned via pointer parameter) + {{ scalar_code | autoindent }} + + for (int _idx_group_idx = 0; _idx_group_idx < _num_indices; _idx_group_idx++) { + const int _idx = {{ _group_idx }}[_idx_group_idx]; + const size_t _vectorisation_idx = _idx; + + {{ vector_code | autoindent }} + + _output[_idx_group_idx] = _variable; + } +{% endblock %} diff --git a/brian2/codegen/runtime/cppyy_rt/templates/group_variable_get_conditional.cpp b/brian2/codegen/runtime/cppyy_rt/templates/group_variable_get_conditional.cpp new file mode 100644 index 000000000..9bb81b807 --- /dev/null +++ b/brian2/codegen/runtime/cppyy_rt/templates/group_variable_get_conditional.cpp @@ -0,0 +1,16 @@ +{# USES_VARIABLES { _group_idx } #} +{% extends 'common_group.cpp' %} + +{% block maincode %} + const size_t _vectorisation_idx = -1; + {{ scalar_code | autoindent }} + + // Note: for cppyy runtime, _return_values handling needs special care. + // The numpy code object uses exec() which can set variables in a namespace. + // For cppyy we compute into an output array parameter instead. + for (int _idx_group_idx = 0; _idx_group_idx < (int)_num_group_idx; _idx_group_idx++) { + const size_t _idx = {{ _group_idx }}[_idx_group_idx]; + const size_t _vectorisation_idx = _idx; + {{ vector_code | autoindent }} + } +{% endblock %} diff --git a/brian2/codegen/runtime/cppyy_rt/templates/group_variable_set.cpp b/brian2/codegen/runtime/cppyy_rt/templates/group_variable_set.cpp new file mode 100644 index 000000000..4a137aed6 --- /dev/null +++ b/brian2/codegen/runtime/cppyy_rt/templates/group_variable_set.cpp @@ -0,0 +1,12 @@ +{# USES_VARIABLES { _group_idx } #} +{% extends 'common_group.cpp' %} + +{% block maincode %} + const size_t _vectorisation_idx = -1; + {{ scalar_code | autoindent }} + for (int _idx_group_idx = 0; _idx_group_idx < (int)_num_group_idx; _idx_group_idx++) { + const size_t _idx = {{ _group_idx }}[_idx_group_idx]; + const size_t _vectorisation_idx = _idx; + {{ vector_code | autoindent }} + } +{% endblock %} diff --git a/brian2/codegen/runtime/cppyy_rt/templates/group_variable_set_conditional.cpp b/brian2/codegen/runtime/cppyy_rt/templates/group_variable_set_conditional.cpp new file mode 100644 index 000000000..6f7349b95 --- /dev/null +++ b/brian2/codegen/runtime/cppyy_rt/templates/group_variable_set_conditional.cpp @@ -0,0 +1,16 @@ +{# USES_VARIABLES { N } #} +{% extends 'common_group.cpp' %} + +{% block maincode %} + const size_t _vectorisation_idx = -1; + {{ scalar_code['condition'] | autoindent }} + {{ scalar_code['statement'] | autoindent }} + const int _N = {{ constant_or_scalar('N', variables['N']) }}; + for (int _idx = 0; _idx < _N; _idx++) { + const size_t _vectorisation_idx = _idx; + {{ vector_code['condition'] | autoindent }} + if (_cond) { + {{ vector_code['statement'] | autoindent }} + } + } +{% endblock %} diff --git a/brian2/codegen/runtime/cppyy_rt/templates/ratemonitor.cpp b/brian2/codegen/runtime/cppyy_rt/templates/ratemonitor.cpp new file mode 100644 index 000000000..435c4a400 --- /dev/null +++ b/brian2/codegen/runtime/cppyy_rt/templates/ratemonitor.cpp @@ -0,0 +1,20 @@ +{# Rate monitor template for cppyy backend #} +{# USES_VARIABLES { _clock_t, _spikespace, _rate, _t, _num_source_neurons } #} +{% extends 'common_group.cpp' %} + +{% block maincode %} + //// MAIN CODE //////////// + {% set _eventspace = get_array_name(eventspace_variable) %} + + const double _current_t = {{ _clock_t }}; + const int _num_spikes = {{ _eventspace }}[_num{{ _eventspace }} - 1]; + const double _dt = {{ dt }}; + const int _source_neurons = {{ _num_source_neurons }}; + + // Calculate instantaneous firing rate + const double _current_rate = (double)_num_spikes / (_source_neurons * _dt); + + // Append to dynamic arrays + {{ _dynamic_t }}.push_back(_current_t); + {{ _dynamic_rate }}.push_back(_current_rate); +{% endblock %} diff --git a/brian2/codegen/runtime/cppyy_rt/templates/reset.cpp b/brian2/codegen/runtime/cppyy_rt/templates/reset.cpp new file mode 100644 index 000000000..701ff8c93 --- /dev/null +++ b/brian2/codegen/runtime/cppyy_rt/templates/reset.cpp @@ -0,0 +1,15 @@ +{# USES_VARIABLES { N } #} +{% extends 'common_group.cpp' %} + +{% block maincode %} + {% set _eventspace = get_array_name(eventspace_variable) %} + const int32_t* _events = {{ _eventspace }}; + const int32_t _num_events = {{ _eventspace }}[{{ constant_or_scalar('N', variables['N']) }}]; + const size_t _vectorisation_idx = -1; + {{ scalar_code | autoindent }} + for (int32_t _index_events = 0; _index_events < _num_events; _index_events++) { + const size_t _idx = _events[_index_events]; + const size_t _vectorisation_idx = _idx; + {{ vector_code | autoindent }} + } +{% endblock %} diff --git a/brian2/codegen/runtime/cppyy_rt/templates/statemonitor.cpp b/brian2/codegen/runtime/cppyy_rt/templates/statemonitor.cpp new file mode 100644 index 000000000..4f0874b49 --- /dev/null +++ b/brian2/codegen/runtime/cppyy_rt/templates/statemonitor.cpp @@ -0,0 +1,24 @@ +{# State monitor template for cppyy backend #} +{# USES_VARIABLES { _clock_t, _indices, N } #} +{% extends 'common_group.cpp' %} + +{% block maincode %} + //// MAIN CODE //////////// + const double _current_t = {{ _clock_t }}; + const int _num_indices = _num{{ _indices }}; + + // Record time + {{ _dynamic_t }}.push_back(_current_t); + + // Record state variables for each monitored index + for (int _i = 0; _i < _num_indices; _i++) { + const int _idx = {{ _indices }}[_i]; + const size_t _vectorisation_idx = _idx; + + {% for varname in record_variables %} + // Record {{ varname }} + {{ vector_code[varname] | autoindent }} + {{ _dynamic_ ~ varname }}.push_back(_to_record_{{ varname }}); + {% endfor %} + } +{% endblock %} diff --git a/brian2/codegen/runtime/cppyy_rt/templates/stateupdate.cpp b/brian2/codegen/runtime/cppyy_rt/templates/stateupdate.cpp new file mode 100644 index 000000000..654ac7bea --- /dev/null +++ b/brian2/codegen/runtime/cppyy_rt/templates/stateupdate.cpp @@ -0,0 +1,18 @@ +{# ITERATE_ALL { _idx } #} +{# USES_VARIABLES { N } #} +{# ALLOWS_SCALAR_WRITE #} +{% extends 'common_group.cpp' %} + +{% block maincode %} + // scalar code (runs once, outside the loop) + const size_t _vectorisation_idx = -1; + {{ scalar_code | autoindent }} + + const int _N = {{ constant_or_scalar('N', variables['N']) }}; + + // vector code (runs per neuron) + for (int _idx = 0; _idx < _N; _idx++) { + const size_t _vectorisation_idx = _idx; + {{ vector_code | autoindent }} + } +{% endblock %} diff --git a/brian2/codegen/runtime/cppyy_rt/templates/summed_variable.cpp b/brian2/codegen/runtime/cppyy_rt/templates/summed_variable.cpp new file mode 100644 index 000000000..434561739 --- /dev/null +++ b/brian2/codegen/runtime/cppyy_rt/templates/summed_variable.cpp @@ -0,0 +1,18 @@ +{# USES_VARIABLES { N } #} +{% extends 'common_group.cpp' %} + +{% block maincode %} + {% set _target_var_array = get_array_name(_target_var) %} + {% set _index_array = get_array_name(_index_var) %} + const int _target_size = {{ constant_or_scalar(_target_size_name, variables[_target_size_name]) }}; + for (int _target_idx = 0; _target_idx < _target_size; _target_idx++) { + {{ _target_var_array }}[_target_idx + {{ _target_start }}] = 0; + } + const size_t _vectorisation_idx = -1; + {{ scalar_code | autoindent }} + for (int _idx = 0; _idx < {{ N }}; _idx++) { + const size_t _vectorisation_idx = _idx; + {{ vector_code | autoindent }} + {{ _target_var_array }}[{{ _index_array }}[_idx]] += _synaptic_var; + } +{% endblock %} diff --git a/brian2/codegen/runtime/cppyy_rt/templates/threshold.cpp b/brian2/codegen/runtime/cppyy_rt/templates/threshold.cpp new file mode 100644 index 000000000..fcbfe84a6 --- /dev/null +++ b/brian2/codegen/runtime/cppyy_rt/templates/threshold.cpp @@ -0,0 +1,17 @@ +{# USES_VARIABLES { N, _spikespace } #} +{% extends 'common_group.cpp' %} + +{% block maincode %} + const size_t _vectorisation_idx = -1; + {{ scalar_code | autoindent }} + const int _N = {{ constant_or_scalar('N', variables['N']) }}; + long _count = 0; + for (int _idx = 0; _idx < _N; _idx++) { + const size_t _vectorisation_idx = _idx; + {{ vector_code | autoindent }} + if (_cond) { + {{ _spikespace }}[_count++] = _idx; + } + } + {{ _spikespace }}[_N] = _count; +{% endblock %} diff --git a/brian2/codegen/targets.py b/brian2/codegen/targets.py index 7f56d5f05..158b57208 100644 --- a/brian2/codegen/targets.py +++ b/brian2/codegen/targets.py @@ -5,4 +5,8 @@ __all__ = ["codegen_targets"] # This should be filled in by subpackages +# +#: Set of all registered code generation target classes. +#: Each target is a CodeObject subclass with a `class_name` attribute. +#: Targets register themselves by calling codegen_targets.add(TargetClass) codegen_targets = set() diff --git a/brian2/devices/device.py b/brian2/devices/device.py index ea6298757..4a9d64978 100644 --- a/brian2/devices/device.py +++ b/brian2/devices/device.py @@ -44,9 +44,11 @@ def auto_target(): """ - Automatically chose a code generation target (invoked when the - `codegen.target` preference is set to `'auto'`. Caches its result so it - only does the check once. Prefers cython > numpy. + Automatically choose a code generation target (invoked when the + `codegen.target` preference is set to `'auto'`). Caches its result so it + only does the check once. + + Priority order: cython > cppyy > numpy Returns ------- @@ -58,9 +60,20 @@ def auto_target(): target_dict = { target.class_name: target for target in codegen_targets if target.class_name } + using_fallback = False + + # Priority: cython > cppyy > numpy if "cython" in target_dict and target_dict["cython"].is_available(): _auto_target = target_dict["cython"] + elif "cppyy" in target_dict and target_dict["cppyy"].is_available(): + _auto_target = target_dict["cppyy"] + logger.info( + "Using cppyy for code generation. cppyy provides JIT " + "compilation without requiring an external C++ compiler.", + "codegen_cppyy", + once=True, + ) else: _auto_target = target_dict["numpy"] using_fallback = True @@ -77,12 +90,20 @@ def auto_target(): ) else: logger.debug( - "Chosing %r as the code generation target." % _auto_target.class_name + "Choosing %r as the code generation target." % _auto_target.class_name ) return _auto_target +def reset_auto_target(): + """ + Reset the cached auto target. Used for testing. + """ + global _auto_target + _auto_target = None + + class Device: """ Base Device object. @@ -268,16 +289,29 @@ def code_object_class(self, codeobj_class=None, fallback_pref="codegen.target"): if isinstance(codeobj_class, str): if codeobj_class == "auto": return auto_target() + + # Look up the target by name for target in codegen_targets: if target.class_name == codeobj_class: + # Check if the target is available + if ( + hasattr(target, "is_available") + and not target.is_available() + ): + raise ValueError( + f"Code generation target '{codeobj_class}' is not " + f"available. Please ensure the required dependencies " + f"are installed." + ) return target - # No target found - targets = ["auto"] + [ + + # No target found - provide helpful error message + available_targets = ["auto"] + [ target.class_name for target in codegen_targets if target.class_name ] raise ValueError( - f"Unknown code generation target: {codeobj_class}, should be one" - f" of {targets}" + f"Unknown code generation target: '{codeobj_class}'. " + f"Should be one of {available_targets}" ) else: return codeobj_class From a16280b8156dd8b12d7856bce0be39b25ca360b9 Mon Sep 17 00:00:00 2001 From: Legend101Zz <96632943+Legend101Zz@users.noreply.github.com> Date: Sat, 14 Feb 2026 00:56:31 +0530 Subject: [PATCH 02/29] remove: unneeded files --- .../runtime/cppyy_rt/extension_manager.py | 963 ------------------ 1 file changed, 963 deletions(-) delete mode 100644 brian2/codegen/runtime/cppyy_rt/extension_manager.py diff --git a/brian2/codegen/runtime/cppyy_rt/extension_manager.py b/brian2/codegen/runtime/cppyy_rt/extension_manager.py deleted file mode 100644 index c3b59da7a..000000000 --- a/brian2/codegen/runtime/cppyy_rt/extension_manager.py +++ /dev/null @@ -1,963 +0,0 @@ -""" -Extension manager for cppyy runtime backend. - -This module provides caching, lifecycle management, and utility functions -for the cppyy-based code generation backend. Unlike the Cython extension -manager which deals with filesystem-based compilation, this manager handles -in-memory compilation and function caching. - -Key responsibilities: -1. Function cache management (in-memory, content-addressed) -2. Compilation lock management (thread safety) -3. Infrastructure initialization (one-time setup) -4. Diagnostics and statistics - -""" - -from __future__ import annotations - -import hashlib -import threading -import time -from collections import OrderedDict -from dataclasses import dataclass, field -from typing import Any - -from brian2.utils.logger import get_logger - -__all__ = [ - "CppyyExtensionManager", - "CppyyFunctionCache", - "CppyyInfrastructure", - "get_extension_manager", -] - -logger = get_logger(__name__) - - -# ============================================================================= -# Statistics Tracking -# ============================================================================= - - -@dataclass -class CompilationStats: - """ - Statistics for cppyy compilation performance. - - Attributes: - total_compilations: Total number of compilations performed. - cache_hits: Number of times a cached function was reused. - cache_misses: Number of times compilation was required. - total_compile_time: Cumulative time spent compiling (seconds). - total_code_size: Cumulative size of compiled code (bytes). - evictions: Number of functions evicted from cache. - """ - - total_compilations: int = 0 - cache_hits: int = 0 - cache_misses: int = 0 - total_compile_time: float = 0.0 - total_code_size: int = 0 - evictions: int = 0 - errors: int = 0 - - def record_compilation(self, compile_time: float, code_size: int) -> None: - """Record a successful compilation.""" - self.total_compilations += 1 - self.cache_misses += 1 - self.total_compile_time += compile_time - self.total_code_size += code_size - - def record_cache_hit(self) -> None: - """Record a cache hit.""" - self.cache_hits += 1 - - def record_eviction(self) -> None: - """Record a cache eviction.""" - self.evictions += 1 - - def record_error(self) -> None: - """Record a compilation error.""" - self.errors += 1 - - @property - def hit_rate(self) -> float: - """Calculate cache hit rate.""" - total = self.cache_hits + self.cache_misses - return self.cache_hits / total if total > 0 else 0.0 - - @property - def average_compile_time(self) -> float: - """Calculate average compilation time.""" - if self.total_compilations == 0: - return 0.0 - return self.total_compile_time / self.total_compilations - - def __str__(self) -> str: - return ( - f"CompilationStats(\n" - f" compilations={self.total_compilations},\n" - f" cache_hits={self.cache_hits},\n" - f" cache_misses={self.cache_misses},\n" - f" hit_rate={self.hit_rate:.1%},\n" - f" avg_compile_time={self.average_compile_time * 1000:.1f}ms,\n" - f" total_code_size={self.total_code_size / 1024:.1f}KB,\n" - f" evictions={self.evictions},\n" - f" errors={self.errors}\n" - f")" - ) - - -# ============================================================================= -# Function Cache -# ============================================================================= - - -@dataclass -class CachedFunction: - """ - A cached compiled function with metadata. - - Attributes: - func: The compiled cppyy function proxy. - code_hash: SHA256 hash of the source code. - function_name: Name of the C++ function. - created_at: Timestamp when the function was compiled. - last_used: Timestamp when the function was last called. - use_count: Number of times the function has been called. - code_size: Size of the source code in bytes. - """ - - func: Any - code_hash: str - function_name: str - created_at: float = field(default_factory=time.time) - last_used: float = field(default_factory=time.time) - use_count: int = 0 - code_size: int = 0 - - def touch(self) -> None: - """Update last_used timestamp and increment use count.""" - self.last_used = time.time() - self.use_count += 1 - - -class CppyyFunctionCache: - """ - Thread-safe LRU cache for compiled cppyy functions. - - This cache stores compiled C++ functions keyed by a hash of their source - code. It uses an LRU (Least Recently Used) eviction policy when the cache - exceeds its maximum size. - - The cache is designed to be shared across all CppyyCodeObject instances - within a process to maximize code reuse. - - Thread Safety: - All public methods are thread-safe and protected by a reentrant lock. - - Attributes: - max_size: Maximum number of functions to cache. - _cache: OrderedDict mapping code hashes to CachedFunction objects. - _lock: Threading lock for thread-safe access. - _stats: Compilation statistics tracker. - """ - - def __init__(self, max_size: int = 1000) -> None: - """ - Initialize the function cache. - - Args: - max_size: Maximum number of functions to cache. When exceeded, - least recently used functions are evicted. - """ - self._max_size = max_size - self._cache: OrderedDict[str, CachedFunction] = OrderedDict() - self._lock = threading.RLock() - self._stats = CompilationStats() - - # Reverse lookup: function name -> code hash - self._name_to_hash: dict[str, str] = {} - - @property - def max_size(self) -> int: - """Get the maximum cache size.""" - return self._max_size - - @max_size.setter - def max_size(self, value: int) -> None: - """Set the maximum cache size, evicting if necessary.""" - with self._lock: - self._max_size = value - self._evict_if_needed() - - def get(self, code_hash: str) -> Any | None: - """ - Get a cached function by its code hash. - - Args: - code_hash: SHA256 hash of the C++ source code. - - Returns: - The compiled function proxy, or None if not found. - """ - with self._lock: - if code_hash in self._cache: - cached = self._cache[code_hash] - cached.touch() - # Move to end (most recently used) - self._cache.move_to_end(code_hash) - self._stats.record_cache_hit() - return cached.func - return None - - def put( - self, - code_hash: str, - func: Any, - function_name: str, - code_size: int = 0, - ) -> None: - """ - Store a compiled function in the cache. - - Args: - code_hash: SHA256 hash of the C++ source code. - func: The compiled cppyy function proxy. - function_name: Name of the C++ function. - code_size: Size of the source code in bytes. - """ - with self._lock: - # Remove existing entry if present - if code_hash in self._cache: - old_entry = self._cache.pop(code_hash) - if old_entry.function_name in self._name_to_hash: - del self._name_to_hash[old_entry.function_name] - - # Evict if necessary - self._evict_if_needed() - - # Add new entry - cached = CachedFunction( - func=func, - code_hash=code_hash, - function_name=function_name, - code_size=code_size, - ) - self._cache[code_hash] = cached - self._name_to_hash[function_name] = code_hash - - def _evict_if_needed(self) -> None: - """Evict least recently used entries if cache is over capacity.""" - while len(self._cache) >= self._max_size: - # Pop the oldest entry (first item in OrderedDict) - code_hash, cached = self._cache.popitem(last=False) - if cached.function_name in self._name_to_hash: - del self._name_to_hash[cached.function_name] - self._stats.record_eviction() - logger.debug(f"Evicted cached function: {cached.function_name}") - - def contains(self, code_hash: str) -> bool: - """Check if a code hash is in the cache.""" - with self._lock: - return code_hash in self._cache - - def get_by_name(self, function_name: str) -> Any | None: - """ - Get a cached function by its function name. - - Args: - function_name: Name of the C++ function. - - Returns: - The compiled function proxy, or None if not found. - """ - with self._lock: - code_hash = self._name_to_hash.get(function_name) - if code_hash is not None: - return self.get(code_hash) - return None - - def clear(self) -> None: - """Clear all cached functions.""" - with self._lock: - self._cache.clear() - self._name_to_hash.clear() - logger.debug("Function cache cleared") - - def __len__(self) -> int: - """Return the number of cached functions.""" - with self._lock: - return len(self._cache) - - @property - def stats(self) -> CompilationStats: - """Get compilation statistics.""" - return self._stats - - def get_info(self) -> dict[str, Any]: - """ - Get detailed information about the cache state. - - Returns: - Dictionary with cache information. - """ - with self._lock: - entries = [] - for code_hash, cached in self._cache.items(): - entries.append( - { - "function_name": cached.function_name, - "code_hash": code_hash[:16] + "...", - "use_count": cached.use_count, - "code_size": cached.code_size, - "age_seconds": time.time() - cached.created_at, - } - ) - - return { - "size": len(self._cache), - "max_size": self._max_size, - "stats": str(self._stats), - "entries": entries, - } - - -# ============================================================================= -# Infrastructure Manager -# ============================================================================= - - -class CppyyInfrastructure: - """ - Singleton managing cppyy initialization and shared C++ infrastructure. - - This class handles one-time loading of: - - Standard C++ headers - - Brian2 common macros and type definitions - - Random number generator infrastructure - - DynamicArray and SpikeQueue implementations - - The infrastructure is initialized lazily on first use and shared - across all CppyyCodeObject instances. - - Thread Safety: - Initialization is protected by a lock to prevent race conditions. - """ - - _instance: CppyyInfrastructure | None = None - _initialized: bool = False - _lock = threading.Lock() - - def __new__(cls) -> CppyyInfrastructure: - """Ensure singleton pattern.""" - if cls._instance is None: - with cls._lock: - if cls._instance is None: - cls._instance = super().__new__(cls) - return cls._instance - - def __init__(self) -> None: - """Initialize the infrastructure (only runs once).""" - pass # Actual init is in ensure_initialized() - - def ensure_initialized(self) -> None: - """ - Ensure the cppyy infrastructure is initialized. - - This method is idempotent and thread-safe. It only performs - initialization once, even if called multiple times. - """ - if CppyyInfrastructure._initialized: - return - - with CppyyInfrastructure._lock: - if CppyyInfrastructure._initialized: - return - - self._load_infrastructure() - CppyyInfrastructure._initialized = True - - def _load_infrastructure(self) -> None: - """Load all required C++ infrastructure into cppyy.""" - import cppyy - - logger.debug("Initializing cppyy infrastructure...") - start_time = time.time() - - # Load standard headers - cppyy.include("") - cppyy.include("") - cppyy.include("") - cppyy.include("") - cppyy.include("") - cppyy.include("") - cppyy.include("") - - # Define common infrastructure - cppyy.cppdef(self._get_common_definitions()) - cppyy.cppdef(self._get_dynamic_array_definitions()) - cppyy.cppdef(self._get_spike_queue_definitions()) - cppyy.cppdef(self._get_random_definitions()) - - elapsed = time.time() - start_time - logger.debug(f"cppyy infrastructure initialized in {elapsed * 1000:.1f}ms") - - def _get_common_definitions(self) -> str: - """Get common C++ definitions.""" - return """ -#ifndef BRIAN2_CPPYY_COMMON -#define BRIAN2_CPPYY_COMMON - -#include -#include -#include - -namespace brian2_cppyy { - -// Integer types -using std::int8_t; -using std::int16_t; -using std::int32_t; -using std::int64_t; -using std::uint8_t; -using std::uint16_t; -using std::uint32_t; -using std::uint64_t; -using std::size_t; - -// Clip function -template -inline T _clip(T value, T min_val, T max_val) { - return std::min(std::max(value, min_val), max_val); -} - -// Integer division (floor division matching Python) -inline int64_t _floordiv(int64_t a, int64_t b) { - int64_t q = a / b; - int64_t r = a % b; - if ((r != 0) && ((r < 0) != (b < 0))) { - q -= 1; - } - return q; -} - -// Modulo matching Python semantics -inline int64_t _mod(int64_t a, int64_t b) { - int64_t r = a % b; - if ((r != 0) && ((r < 0) != (b < 0))) { - r += b; - } - return r; -} - -// Sign function -template -inline int _sign(T val) { - return (T(0) < val) - (val < T(0)); -} - -// Boolean conversion -inline int _bool_to_int(bool b) { - return b ? 1 : 0; -} - -} // namespace brian2_cppyy - -#endif // BRIAN2_CPPYY_COMMON -""" - - def _get_dynamic_array_definitions(self) -> str: - """Get DynamicArray C++ definitions.""" - return """ -#ifndef BRIAN2_CPPYY_DYNAMIC_ARRAY -#define BRIAN2_CPPYY_DYNAMIC_ARRAY - -#include -#include -#include - -namespace brian2_cppyy { - -template -class DynamicArray1D { -private: - std::vector _data; - -public: - DynamicArray1D() = default; - explicit DynamicArray1D(size_t size) : _data(size) {} - DynamicArray1D(size_t size, T value) : _data(size, value) {} - - // Data access - T* data() noexcept { return _data.data(); } - const T* data() const noexcept { return _data.data(); } - - // Size operations - size_t size() const noexcept { return _data.size(); } - bool empty() const noexcept { return _data.empty(); } - void resize(size_t new_size) { _data.resize(new_size); } - void resize(size_t new_size, T value) { _data.resize(new_size, value); } - void reserve(size_t capacity) { _data.reserve(capacity); } - void clear() { _data.clear(); } - - // Element access - T& operator[](size_t idx) { return _data[idx]; } - const T& operator[](size_t idx) const { return _data[idx]; } - T& at(size_t idx) { return _data.at(idx); } - const T& at(size_t idx) const { return _data.at(idx); } - - // Modification - void push_back(const T& value) { _data.push_back(value); } - void push_back(T&& value) { _data.push_back(std::move(value)); } - void pop_back() { _data.pop_back(); } - - // Iterators - typename std::vector::iterator begin() { return _data.begin(); } - typename std::vector::iterator end() { return _data.end(); } - typename std::vector::const_iterator begin() const { return _data.begin(); } - typename std::vector::const_iterator end() const { return _data.end(); } -}; - -template -class DynamicArray2D { -private: - std::vector> _data; - size_t _cols; - -public: - DynamicArray2D() : _cols(0) {} - explicit DynamicArray2D(size_t rows) : _data(rows), _cols(0) {} - DynamicArray2D(size_t rows, size_t cols) - : _data(rows, std::vector(cols)), _cols(cols) {} - DynamicArray2D(size_t rows, size_t cols, T value) - : _data(rows, std::vector(cols, value)), _cols(cols) {} - - // Row access - std::vector& operator[](size_t idx) { return _data[idx]; } - const std::vector& operator[](size_t idx) const { return _data[idx]; } - - // Size operations - size_t size() const noexcept { return _data.size(); } - size_t rows() const noexcept { return _data.size(); } - size_t cols() const noexcept { return _cols; } - bool empty() const noexcept { return _data.empty(); } - - void resize(size_t new_rows) { - _data.resize(new_rows); - for (auto& row : _data) { - row.resize(_cols); - } - } - - void resize(size_t new_rows, size_t new_cols) { - _cols = new_cols; - _data.resize(new_rows); - for (auto& row : _data) { - row.resize(new_cols); - } - } - - void clear() { - _data.clear(); - _cols = 0; - } -}; - -} // namespace brian2_cppyy - -#endif // BRIAN2_CPPYY_DYNAMIC_ARRAY -""" - - def _get_spike_queue_definitions(self) -> str: - """Get SpikeQueue C++ definitions.""" - return """ -#ifndef BRIAN2_CPPYY_SPIKE_QUEUE -#define BRIAN2_CPPYY_SPIKE_QUEUE - -#include -#include -#include - -namespace brian2_cppyy { - -class SpikeQueue { -private: - std::vector> _queue; - size_t _current_idx; - size_t _n_delays; - int32_t* _delays; - size_t _n_synapses; - int32_t _source_start; - int32_t _source_end; - -public: - SpikeQueue() - : _current_idx(0) - , _n_delays(1) - , _delays(nullptr) - , _n_synapses(0) - , _source_start(0) - , _source_end(0) - { - _queue.resize(1); - } - - SpikeQueue(int32_t source_start, int32_t source_end) - : _current_idx(0) - , _n_delays(1) - , _delays(nullptr) - , _n_synapses(0) - , _source_start(source_start) - , _source_end(source_end) - { - _queue.resize(1); - } - - void prepare(int32_t* delays, size_t n_delays, size_t n_synapses) { - _delays = delays; - _n_delays = n_delays > 0 ? n_delays : 1; - _n_synapses = n_synapses; - - _queue.clear(); - _queue.resize(_n_delays); - _current_idx = 0; - } - - void push(int32_t* spike_indices, int n_spikes) { - if (n_spikes == 0) return; - - // Simple implementation: push all spikes to current slot - // Full implementation would use delays - auto& current = _queue[_current_idx % _n_delays]; - for (int i = 0; i < n_spikes; ++i) { - current.push_back(spike_indices[i]); - } - } - - std::vector* peek() { - return &_queue[_current_idx % _n_delays]; - } - - const std::vector* peek() const { - return &_queue[_current_idx % _n_delays]; - } - - void advance() { - _queue[_current_idx % _n_delays].clear(); - _current_idx++; - } - - size_t size() const { - return _queue[_current_idx % _n_delays].size(); - } - - void clear() { - for (auto& slot : _queue) { - slot.clear(); - } - _current_idx = 0; - } -}; - -} // namespace brian2_cppyy - -#endif // BRIAN2_CPPYY_SPIKE_QUEUE -""" - - def _get_random_definitions(self) -> str: - """Get random number generator C++ definitions.""" - return """ -#ifndef BRIAN2_CPPYY_RANDOM -#define BRIAN2_CPPYY_RANDOM - -#include -#include - -namespace brian2_cppyy { - -// Thread-local random engine for each compilation unit -thread_local std::mt19937_64 _rng; -thread_local bool _rng_seeded = false; - -inline void seed_rng(uint64_t seed) { - _rng.seed(seed); - _rng_seeded = true; -} - -inline void ensure_rng_seeded() { - if (!_rng_seeded) { - std::random_device rd; - _rng.seed(rd()); - _rng_seeded = true; - } -} - -inline double _rand() { - ensure_rng_seeded(); - std::uniform_real_distribution dist(0.0, 1.0); - return dist(_rng); -} - -inline double _randn() { - ensure_rng_seeded(); - std::normal_distribution dist(0.0, 1.0); - return dist(_rng); -} - -inline int32_t _poisson(double lambda) { - ensure_rng_seeded(); - if (lambda <= 0) return 0; - std::poisson_distribution dist(lambda); - return dist(_rng); -} - -inline double _rand_uniform(double low, double high) { - ensure_rng_seeded(); - std::uniform_real_distribution dist(low, high); - return dist(_rng); -} - -inline int32_t _rand_int(int32_t low, int32_t high) { - ensure_rng_seeded(); - std::uniform_int_distribution dist(low, high - 1); - return dist(_rng); -} - -inline double _rand_exponential(double beta) { - ensure_rng_seeded(); - std::exponential_distribution dist(1.0 / beta); - return dist(_rng); -} - -inline double _rand_gamma(double alpha, double beta) { - ensure_rng_seeded(); - std::gamma_distribution dist(alpha, beta); - return dist(_rng); -} - -} // namespace brian2_cppyy - -#endif // BRIAN2_CPPYY_RANDOM -""" - - @property - def is_initialized(self) -> bool: - """Check if infrastructure is initialized.""" - return CppyyInfrastructure._initialized - - @classmethod - def reset(cls) -> None: - """ - Reset the infrastructure state (mainly for testing). - - Warning: This does not unload the C++ definitions from cppyy, - as that's not possible. It only resets the initialization flag. - """ - with cls._lock: - cls._initialized = False - cls._instance = None - - -# ============================================================================= -# Extension Manager -# ============================================================================= - - -class CppyyExtensionManager: - """ - Central manager for the cppyy runtime backend. - - This class coordinates: - - Function caching - - Infrastructure initialization - - Compilation with thread safety - - Statistics and diagnostics - - It provides a high-level interface for CppyyCodeObject to use - for compiling and caching C++ code. - """ - - _instance: CppyyExtensionManager | None = None - _lock = threading.Lock() - - def __new__(cls) -> CppyyExtensionManager: - """Ensure singleton pattern.""" - if cls._instance is None: - with cls._lock: - if cls._instance is None: - cls._instance = super().__new__(cls) - cls._instance._initialized = False - return cls._instance - - def __init__(self) -> None: - """Initialize the extension manager.""" - if self._initialized: - return - - from brian2.core.preferences import prefs - - # Get cache size from preferences (with fallback) - try: - cache_size = prefs["codegen.runtime.cppyy.cache_size"] - except KeyError: - cache_size = 1000 - - self._cache = CppyyFunctionCache(max_size=cache_size) - self._infrastructure = CppyyInfrastructure() - self._compile_lock = threading.Lock() - self._initialized = True - - @property - def cache(self) -> CppyyFunctionCache: - """Get the function cache.""" - return self._cache - - @property - def infrastructure(self) -> CppyyInfrastructure: - """Get the infrastructure manager.""" - return self._infrastructure - - @property - def stats(self) -> CompilationStats: - """Get compilation statistics.""" - return self._cache.stats - - def ensure_initialized(self) -> None: - """Ensure infrastructure is initialized.""" - self._infrastructure.ensure_initialized() - - def compile( - self, - code: str, - function_name: str, - force: bool = False, - ) -> Any: - """ - Compile C++ code and return the function proxy. - - This method handles: - 1. Computing code hash - 2. Checking cache - 3. Thread-safe compilation - 4. Caching the result - - Args: - code: C++ source code to compile. - function_name: Name of the function in the code. - force: If True, recompile even if cached. - - Returns: - The compiled cppyy function proxy. - - Raises: - RuntimeError: If compilation fails. - """ - import cppyy - - # Ensure infrastructure is ready - self.ensure_initialized() - - # Compute hash - code_hash = hashlib.sha256(code.encode()).hexdigest() - - # Check cache (unless forced) - if not force: - cached = self._cache.get(code_hash) - if cached is not None: - logger.debug(f"Cache hit for {function_name}") - return cached - - # Compile with lock - with self._compile_lock: - # Double-check cache (another thread may have compiled) - if not force: - cached = self._cache.get(code_hash) - if cached is not None: - return cached - - logger.debug(f"Compiling {function_name}...") - start_time = time.time() - - try: - cppyy.cppdef(code) - func = getattr(cppyy.gbl, function_name) - except Exception as e: - self._cache.stats.record_error() - raise RuntimeError( - f"Failed to compile {function_name}: {e}\n" - f"Code:\n{self._format_code(code)}" - ) from e - - compile_time = time.time() - start_time - code_size = len(code.encode()) - - # Update stats - self._cache.stats.record_compilation(compile_time, code_size) - - # Cache the result - self._cache.put(code_hash, func, function_name, code_size) - - logger.debug( - f"Compiled {function_name} in {compile_time * 1000:.1f}ms " - f"({code_size} bytes)" - ) - - return func - - def _format_code(self, code: str, max_lines: int = 50) -> str: - """Format code with line numbers for error messages.""" - lines = code.split("\n") - if len(lines) > max_lines: - # Show first and last portions - half = max_lines // 2 - formatted_lines = [] - for i, line in enumerate(lines[:half], 1): - formatted_lines.append(f"{i:4d} | {line}") - formatted_lines.append( - f" | ... ({len(lines) - max_lines} lines omitted) ..." - ) - for i, line in enumerate(lines[-half:], len(lines) - half + 1): - formatted_lines.append(f"{i:4d} | {line}") - return "\n".join(formatted_lines) - else: - return "\n".join(f"{i:4d} | {line}" for i, line in enumerate(lines, 1)) - - def clear_cache(self) -> None: - """Clear the function cache.""" - self._cache.clear() - - def get_diagnostics(self) -> dict[str, Any]: - """ - Get diagnostic information about the extension manager. - - Returns: - Dictionary with diagnostic information. - """ - return { - "infrastructure_initialized": self._infrastructure.is_initialized, - "cache_info": self._cache.get_info(), - "stats": str(self._cache.stats), - } - - -# ============================================================================= -# Module-level Access -# ============================================================================= - -# Global extension manager instance -_extension_manager: CppyyExtensionManager | None = None - - -def get_extension_manager() -> CppyyExtensionManager: - """ - Get the global extension manager instance. - - Returns: - The singleton CppyyExtensionManager instance. - """ - global _extension_manager - if _extension_manager is None: - _extension_manager = CppyyExtensionManager() - return _extension_manager From d3908757b230a4a2555687b1c074ad842ee47454 Mon Sep 17 00:00:00 2001 From: Legend101Zz <96632943+Legend101Zz@users.noreply.github.com> Date: Sat, 14 Feb 2026 00:58:22 +0530 Subject: [PATCH 03/29] fix: template --- brian2/codegen/runtime/cppyy_rt/templates/common_group.cpp | 6 ------ 1 file changed, 6 deletions(-) diff --git a/brian2/codegen/runtime/cppyy_rt/templates/common_group.cpp b/brian2/codegen/runtime/cppyy_rt/templates/common_group.cpp index 6fe24a5eb..2ac144e48 100644 --- a/brian2/codegen/runtime/cppyy_rt/templates/common_group.cpp +++ b/brian2/codegen/runtime/cppyy_rt/templates/common_group.cpp @@ -7,9 +7,7 @@ {% endmacro %} -{# ══════════════════════════════════════════════════════════════════════ #} {# BLOCK: before_run — runs once before simulation starts #} -{# ══════════════════════════════════════════════════════════════════════ #} {% macro before_run() %} {% set _func_name = "_brian_cppyy_before_run_" + _safe_name %} @@ -26,9 +24,7 @@ extern "C" void {{ _func_name }}({{ param_list() }}) { {% endmacro %} -{# ══════════════════════════════════════════════════════════════════════ #} {# BLOCK: run — the main simulation step, runs every timestep #} -{# ══════════════════════════════════════════════════════════════════════ #} {% macro run() %} {% set _func_name = "_brian_cppyy_run_" + _safe_name %} @@ -48,9 +44,7 @@ extern "C" void {{ _func_name }}({{ param_list() }}) { {% endmacro %} -{# ══════════════════════════════════════════════════════════════════════ #} {# BLOCK: after_run — runs once after simulation completes #} -{# ══════════════════════════════════════════════════════════════════════ #} {% macro after_run() %} {% set _func_name = "_brian_cppyy_after_run_" + _safe_name %} From 9431c44cf37bcaca157ce44b8bf36ff1888810ed Mon Sep 17 00:00:00 2001 From: Legend101Zz <96632943+Legend101Zz@users.noreply.github.com> Date: Sat, 14 Feb 2026 15:18:45 +0530 Subject: [PATCH 04/29] feat: add cppyy introspector and dynamic array fix --- brian2/codegen/generators/cppyy_generator.py | 22 +- brian2/codegen/runtime/cppyy_rt/cppyy_rt.py | 147 +++- .../codegen/runtime/cppyy_rt/introspector.py | 775 ++++++++++++++++++ .../cppyy_rt/templates/spikemonitor.cpp | 73 ++ .../cppyy_rt/templates/statemonitor.cpp | 63 +- 5 files changed, 1028 insertions(+), 52 deletions(-) create mode 100644 brian2/codegen/runtime/cppyy_rt/introspector.py create mode 100644 brian2/codegen/runtime/cppyy_rt/templates/spikemonitor.cpp diff --git a/brian2/codegen/generators/cppyy_generator.py b/brian2/codegen/generators/cppyy_generator.py index b5d66af78..4b110d5e5 100644 --- a/brian2/codegen/generators/cppyy_generator.py +++ b/brian2/codegen/generators/cppyy_generator.py @@ -86,9 +86,6 @@ def determine_keywords(self) -> dict[str, Any]: _build_param_mapping does the same, so parameter order is guaranteed to match between the signature and the call site. """ - from brian2.devices.device import get_device - - device: Any = get_device() support_code_parts: list[str] = [] hash_define_parts: list[str] = [] @@ -130,23 +127,34 @@ def determine_keywords(self) -> dict[str, Any]: # --- Array variables: pointer + size parameters --- if isinstance(var, ArrayVariable): - pointer_name: str = self.get_array_name(var) + pointer_name = self.get_array_name(var) if pointer_name in handled_pointers: continue handled_pointers.add(pointer_name) - # Skip multidimensional dynamic arrays (need special handling) if getattr(var, "ndim", 1) > 1: + # 2D dynamic arrays: pass the capsule instead of a data pointer, + # because monitors need to resize them. The C++ code extracts + # the DynamicArray2D* from the capsule and calls methods on it. + if isinstance(var, DynamicArrayVariable): + dyn_name = self.get_array_name(var, access_data=False) + capsule_key = f"{dyn_name}_capsule" + function_params.append(("PyObject*", capsule_key, capsule_key)) continue c_type = _cppyy_c_data_type(var.dtype) - namespace_key: str = device.get_array_name(var) - + namespace_key = self.get_array_name(var) function_params.append((f"{c_type}*", pointer_name, namespace_key)) if not var.scalar: function_params.append(("int", f"_num{varname}", f"_num{varname}")) + # For 1D dynamic arrays, ALSO pass the capsule so monitors can resize + if isinstance(var, DynamicArrayVariable): + dyn_name = self.get_array_name(var, access_data=False) + capsule_key = f"{dyn_name}_capsule" + function_params.append(("PyObject*", capsule_key, capsule_key)) + # Optional denormals flushing (gcc/clang x86) denormals_code: str = "" if self.flush_denormals: diff --git a/brian2/codegen/runtime/cppyy_rt/cppyy_rt.py b/brian2/codegen/runtime/cppyy_rt/cppyy_rt.py index a29907225..65eace597 100644 --- a/brian2/codegen/runtime/cppyy_rt/cppyy_rt.py +++ b/brian2/codegen/runtime/cppyy_rt/cppyy_rt.py @@ -16,6 +16,7 @@ from __future__ import annotations import importlib.util +import os from collections.abc import Callable from typing import Any @@ -60,6 +61,24 @@ default=[], docs="Extra flags passed to cppyy/Cling, e.g. ['-O2', '-ffast-math'].", ), + enable_introspection=BrianPreference( + default=False, + docs=""" + Enable runtime introspection of compiled C++ code. + + When True, all compiled code objects register with a global introspector + that allows viewing generated C++ source, parameter mappings, namespace + contents, and even replacing functions at runtime. + + Adds minor overhead (stores source strings, maintains registry), so + leave disabled for production runs. + + Usage: + prefs.codegen.runtime.cppyy.enable_introspection = True + from brian2.codegen.runtime.cppyy_rt.introspector import get_introspector + intro = get_introspector() + """, + ), ) # --- Lazy cppyy import --- @@ -103,7 +122,7 @@ def _ensure_support_code() -> None: """ Define universal C++ helpers exactly once in cppyy's interpreter. - Covers: standard headers, Brian2's _brian_mod/_brian_pow/etc., int_(), + Covers: The DynamicArray header from brianlib, standard headers, Brian2's _brian_mod/_brian_pow/etc., int_(), and the shared MT19937 RNG engine. Guarded so repeated calls are no-ops. """ global _support_code_initialized @@ -111,6 +130,22 @@ def _ensure_support_code() -> None: return cppyy = _get_cppyy() + + # Add brianlib include path and load dynamic_array.h ── + # This makes DynamicArray1D and DynamicArray2D available to + # all subsequently compiled cppyy code. These are the SAME classes + # that the Cython DynamicArray wrappers use, so pointers are + # compatible across the two FFI boundaries. + import brian2 + + brianlib_path = os.path.join( + os.path.dirname(brian2.__file__), "devices", "cpp_standalone", "brianlib" + ) + cppyy.add_include_path(brianlib_path) + + # Include the header — Cling compiles it and knows the class layout. + # After this, cppyy C++ code can use DynamicArray1D*, etc. + cppyy.include("dynamic_array.h") from brian2.codegen.generators.cpp_generator import _universal_support_code guarded_code: str = f""" @@ -143,6 +178,24 @@ def _ensure_support_code() -> None: // TODO: hook into Brian's seed() system for reproducibility static std::mt19937 _brian_cppyy_rng; + // ── Helper to extract a C++ pointer from a PyCapsule ── + // This is how we bridge Cython's DynamicArray objects to cppyy: + // Cython wraps the C++ pointer in a PyCapsule, Python passes the + // capsule to our function, and we unwrap it back to a C++ pointer. + #include + + template + DynamicArray1D* _extract_dynamic_array_1d(PyObject* capsule) {{ + void* ptr = PyCapsule_GetPointer(capsule, "DynamicArray1D"); + return static_cast*>(ptr); + }} + + template + DynamicArray2D* _extract_dynamic_array_2d(PyObject* capsule) {{ + void* ptr = PyCapsule_GetPointer(capsule, "DynamicArray2D"); + return static_cast*>(ptr); + }} + #endif // _BRIAN2_CPPYY_SUPPORT_CODE """ cppyy.cppdef(guarded_code) @@ -236,6 +289,10 @@ def variables_to_namespace(self) -> None: """ self.nonconstant_values: list[NonconstantEntry] = [] + # Ensure _owner is available (needed for monitors in fallback path) + if "_owner" not in self.namespace: + self.namespace["_owner"] = self.owner + for name, var in self.variables.items(): if isinstance(var, Function): self._insert_func_namespace(var) @@ -264,16 +321,29 @@ def variables_to_namespace(self) -> None: else: self.namespace[name] = value - # Dynamic arrays: store the container object too + # ── Dynamic arrays: store BOTH the data view AND the capsule ── + # The data view (_ptr_array_*) gives C++ direct pointer access + # to the current data buffer, used in computation functions. + # The capsule (_capsule_*) gives C++ access to the DynamicArray + # C++ object itself, used in monitor functions that need resize. if isinstance(var, DynamicArrayVariable): - dyn_name: str = self.generator_class.get_array_name( + dyn_array_name = self.generator_class.get_array_name( var, access_data=False ) - self.namespace[dyn_name] = self.device.get_value(var, access_data=False) + self.namespace[dyn_array_name] = self.device.get_value( + var, access_data=False + ) + + capsule_name = f"{dyn_array_name}_capsule" + try: + capsule = self.device.get_capsule(var) + self.namespace[capsule_name] = capsule + except (TypeError, AttributeError): + # Not all variables support capsules (e.g. plain arrays) + pass self.namespace[f"_var_{name}"] = var - # Track dynamic arrays that get resized externally (e.g. spike monitors) if isinstance(var, DynamicArrayVariable) and var.needs_reference_update: gen_name = self.generator_class.get_array_name(var) self.nonconstant_values.append((gen_name, var.get_value)) @@ -311,6 +381,10 @@ def _build_param_mapping(self) -> list[ParamTuple]: """ Build the (cpp_param_name, namespace_key, c_type) list matching the C++ function signature order. + + This MUST mirror the iteration logic in CppyyCodeGenerator.determine_keywords() + exactly — same sorted order, same filtering, same parameter additions — + otherwise the call-site args won't line up with the compiled signature. """ params: list[ParamTuple] = [] handled_pointers: set[str] = set() @@ -333,6 +407,14 @@ def _build_param_mapping(self) -> list[ParamTuple]: handled_pointers.add(pointer_name) if getattr(var, "ndim", 1) > 1: + # 2D dynamic arrays: pass capsule only (no data pointer). + # Mirrors determine_keywords() which does the same. + if isinstance(var, DynamicArrayVariable): + dyn_name = self.generator_class.get_array_name( + var, access_data=False + ) + capsule_key = f"{dyn_name}_capsule" + params.append((capsule_key, capsule_key, "PyObject*")) continue c_type = _cppyy_c_data_type(var.dtype) @@ -343,6 +425,16 @@ def _build_param_mapping(self) -> list[ParamTuple]: if not var.scalar: params.append((f"_num{varname}", f"_num{varname}", "int")) + # 1D dynamic arrays: ALSO pass the capsule so C++ can resize. + # This mirrors determine_keywords() which appends the capsule + # param right after the pointer + size params. + if isinstance(var, DynamicArrayVariable): + dyn_name = self.generator_class.get_array_name( + var, access_data=False + ) + capsule_key = f"{dyn_name}_capsule" + params.append((capsule_key, capsule_key, "PyObject*")) + return params # --- Compilation --- @@ -361,14 +453,7 @@ def compile_block(self, block: str) -> Any | None: logger.diagnostic(f"cppyy: compiling '{block}' for {self.name}") try: - print(f"\n{'=' * 60}") - print(f"CPPYY COMPILE: {self.name} / block={block}") - print(f"{'=' * 60}") - print(code) - print(f"{'=' * 60}\n") cppyy.cppdef(code) - print("\nCPPYY GLOBAL NAMESPACE:") - print([x for x in dir(cppyy.gbl) if "_brian_" in x]) except Exception as exc: raise BrianObjectException( f"cppyy compilation failed for '{block}' of '{self.name}'.\n" @@ -389,16 +474,10 @@ def compile_block(self, block: str) -> Any | None: self._set_user_func_globals(cppyy) self._param_mappings[block] = self._build_param_mapping() - print(f"\nPARAM MAPPING for {self.name}.{block}:") - for i, (cpp_name, ns_key, ctype) in enumerate(self._param_mappings[block]): - val = self.namespace.get(ns_key, "MISSING") - if hasattr(val, "shape"): - val_desc = f"ndarray shape={val.shape} dtype={val.dtype}" - elif hasattr(val, "get_size"): - val_desc = f"DynamicArray size={val.get_size()}" - else: - val_desc = f"{type(val).__name__} = {val}" - print(f" [{i}] {ctype:20s} {cpp_name:40s} <- ns[{ns_key}] = {val_desc}") + + # register with introspector if enabled + self._register_with_introspector(block, code) + return compiled_func def _set_user_func_globals(self, cppyy: Any) -> None: @@ -434,6 +513,14 @@ def _set_user_func_globals(self, cppyy: Any) -> None: f"'{ns_key}'. May segfault if the function is called." ) + def _register_with_introspector(self, block: str, source: str) -> None: + """Register this code object with the global introspector, if enabled.""" + from .introspector import CppyyIntrospector + + introspector: CppyyIntrospector | None = CppyyIntrospector.get_instance() + if introspector is not None: + introspector.register(self, block, source) + # --- Execution --- def run_block(self, block: str) -> None: @@ -472,17 +559,13 @@ def run_block(self, block: str) -> None: # bool arrays need int8 view so cppyy's buffer protocol matches if val.dtype == np.bool_: val = val.view(np.int8) + # cppyy can't extract a buffer pointer from empty arrays — + # pass a 1-element dummy instead. The C++ code won't read + # past _num* elements anyway, and for dynamic arrays the + # real access goes through the capsule/DynamicArray object. + if val.size == 0 and c_type.endswith("*"): + val = np.zeros(1, dtype=val.dtype) args.append(val) - # print(f"\nCALLING {self.name}.{block} with {len(args)} args:") - # for i, (cpp_name, _, ctype) in enumerate(param_mapping): - # arg = args[i] - # if isinstance(arg, np.ndarray): - # print( - # f" [{i}] {cpp_name}: ndarray({arg.shape}, {arg.dtype}) " - # f"first={arg.flat[0] if arg.size > 0 else 'empty'}" - # ) - # else: - # print(f" [{i}] {cpp_name}: {type(arg).__name__} = {arg}") compiled_func(*args) except Exception as exc: diff --git a/brian2/codegen/runtime/cppyy_rt/introspector.py b/brian2/codegen/runtime/cppyy_rt/introspector.py new file mode 100644 index 000000000..996ddd357 --- /dev/null +++ b/brian2/codegen/runtime/cppyy_rt/introspector.py @@ -0,0 +1,775 @@ +""" +Runtime introspection for the cppyy backend. + +Provides a live window into the C++ JIT interpreter during simulation. +Enable with: prefs.codegen.runtime.cppyy.enable_introspection = True + +Usage in a Jupyter notebook or script: + + from brian2.codegen.runtime.cppyy_rt.introspector import get_introspector + intro = get_introspector() + + intro.list_objects() # see all compiled code objects + intro.source("neurongroup_stateupdater_*") # view generated C++ + intro.params("neurongroup_stateupdater_*") # view parameter mapping + intro.namespace("neurongroup_stateupdater_*") # view runtime values + + body = intro.get_body("neurongroup_stateupdater_*", "run") + new_body = body.replace("exp(_lio_2)", "1.0 + _lio_2") + intro.replace_body("neurongroup_stateupdater_*", "run", new_body) + + intro.restore("neurongroup_stateupdater_*", "run") # undo +""" + +from __future__ import annotations + +import html +from fnmatch import fnmatch +from typing import Any + +import numpy as np + +from brian2.core.preferences import prefs +from brian2.utils.logger import get_logger + +logger = get_logger(__name__) + +# --- Type aliases --- +ParamTuple = tuple[str, str, str] + + +def get_introspector() -> CppyyIntrospector | None: + """ + Get the global introspector instance, or None if introspection is disabled. + + Returns None (not an error) when the preference is off, so callers can do: + if intro := get_introspector(): + intro.list_objects() + """ + return CppyyIntrospector.get_instance() + + +class CppyyIntrospector: + """ + A live debugging interface into cppyy's JIT-compiled C++ code. + + This is a singleton — all code objects register with the same instance. + Created lazily on first access when introspection is enabled. + """ + + _instance: CppyyIntrospector | None = None + + def __init__(self) -> None: + # All registered code objects, keyed by name + self._objects: dict[str, Any] = {} # name → CppyyCodeObject + + # C++ source for each (codeobj_name, block) pair + self._sources: dict[tuple[str, str], str] = {} + + # Original source and function ref, for restore() + self._original_sources: dict[tuple[str, str], str] = {} + self._original_funcs: dict[tuple[str, str], Any] = {} + + # Version counter for function replacement (can't redefine extern "C") + self._version_counter: dict[tuple[str, str], int] = {} + + # Counter for eval_cpp one-off functions + self._eval_counter: int = 0 + + @classmethod + def get_instance(cls) -> CppyyIntrospector | None: + """Get or create the singleton. Returns None if introspection is disabled.""" + if not prefs.codegen.runtime.cppyy.enable_introspection: + return None + if cls._instance is None: + cls._instance = cls() + return cls._instance + + @classmethod + def reset(cls) -> None: + """Tear down the singleton. Useful between test runs.""" + cls._instance = None + + def register(self, codeobj: Any, block: str, source: str) -> None: + """ + Record a code object and the C++ source it compiled. + + Called automatically by compile_block() when introspection is enabled. + Stores both the code object reference (for live namespace access) and + the source string (for display and function replacement). + """ + name: str = codeobj.name + self._objects[name] = codeobj + self._sources[(name, block)] = source + self._original_sources[(name, block)] = source + self._original_funcs[(name, block)] = codeobj.compiled_code.get(block) + logger.diagnostic(f"introspector: registered {name}.{block}") + + def _resolve_name(self, pattern: str) -> str: + """ + Resolve a name or glob pattern to a single code object name. + + Allows shorthand like "stateupdater*" instead of the full + "neurongroup_stateupdater_codeobject" name. + """ + # Exact match first + if pattern in self._objects: + return pattern + + # Glob match + matches: list[str] = [name for name in self._objects if fnmatch(name, pattern)] + if len(matches) == 1: + return matches[0] + elif len(matches) == 0: + available: str = ", ".join(sorted(self._objects.keys())) + raise KeyError( + f"No code object matching '{pattern}'. Available: {available}" + ) + else: + raise KeyError( + f"Pattern '{pattern}' matches multiple objects: {matches}. " + f"Be more specific." + ) + + def _resolve_names(self, pattern: str) -> list[str]: + """Resolve a pattern to all matching names (for list_objects filtering).""" + if pattern == "*": + return sorted(self._objects.keys()) + return sorted(name for name in self._objects if fnmatch(name, pattern)) + + def list_objects(self, pattern: str = "*") -> ObjectListDisplay: + """ + List all registered code objects, their blocks, and template types. + + Returns a display object that renders as a table in Jupyter or + as formatted text in a terminal. + """ + rows: list[dict[str, str]] = [] + for name in self._resolve_names(pattern): + codeobj = self._objects[name] + blocks: list[str] = [ + block + for block in ("before_run", "run", "after_run") + if (name, block) in self._sources + ] + rows.append( + { + "name": name, + "template": getattr(codeobj, "template_name", "?"), + "blocks": ", ".join(blocks), + "num_vars": str(len(codeobj.variables)), + } + ) + return ObjectListDisplay(rows) + + def source(self, pattern: str, block: str = "run") -> SourceDisplay: + """ + View the C++ source for a code object's block. + + Returns a display object with the source code. In Jupyter, this + renders with basic syntax highlighting. + """ + name: str = self._resolve_name(pattern) + key: tuple[str, str] = (name, block) + if key not in self._sources: + available_blocks: list[str] = [b for n, b in self._sources if n == name] + raise KeyError( + f"No source for {name}.{block}. Available blocks: {available_blocks}" + ) + return SourceDisplay(self._sources[key], title=f"{name}.{block}") + + def params(self, pattern: str, block: str = "run") -> ParamsDisplay: + """ + View the parameter mapping for a code object's block. + + Shows how each C++ function parameter maps to a namespace key + and its current runtime value. + """ + name: str = self._resolve_name(pattern) + codeobj = self._objects[name] + mapping: list[ParamTuple] = codeobj._param_mappings.get(block, []) + + rows: list[dict[str, Any]] = [] + for i, (cpp_name, ns_key, c_type) in enumerate(mapping): + val: Any = codeobj.namespace.get(ns_key, "") + rows.append( + { + "index": i, + "c_type": c_type, + "cpp_name": cpp_name, + "ns_key": ns_key, + "value": _describe_value(val), + } + ) + return ParamsDisplay(rows, title=f"{name}.{block}") + + def namespace(self, pattern: str) -> NamespaceDisplay: + """ + View the full namespace dict for a code object, categorized by type. + + Categories: arrays, sizes, constants, variables, dynamic arrays, other. + """ + name: str = self._resolve_name(pattern) + codeobj = self._objects[name] + ns: dict[str, Any] = codeobj.namespace + + categorized: dict[str, list[tuple[str, str]]] = { + "arrays": [], + "sizes": [], + "constants": [], + "variable_objects": [], + "dynamic_arrays": [], + "other": [], + } + + for key in sorted(ns.keys()): + val = ns[key] + desc = _describe_value(val) + + if key.startswith("_ptr_array_"): + categorized["arrays"].append((key, desc)) + elif key.startswith("_num"): + categorized["sizes"].append((key, desc)) + elif key.startswith("_var_"): + categorized["variable_objects"].append((key, desc)) + elif key.startswith("_dynamic_array_"): + categorized["dynamic_arrays"].append((key, desc)) + elif isinstance(val, (int, float, np.integer, np.floating)): + categorized["constants"].append((key, desc)) + else: + categorized["other"].append((key, desc)) + + return NamespaceDisplay(categorized, title=name) + + def inspect(self, pattern: str, block: str = "run") -> InspectDisplay: + """ + Full inspection: source + params + namespace in one view. + In Jupyter, renders as collapsible sections. + """ + name: str = self._resolve_name(pattern) + return InspectDisplay( + source=self.source(name, block), + params=self.params(name, block), + namespace=self.namespace(name), + title=f"{name}.{block}", + ) + + def cpp_globals(self) -> list[str]: + """List all Brian-related symbols in cppyy's global namespace.""" + from .cppyy_rt import _get_cppyy + + cppyy = _get_cppyy() + return sorted(x for x in dir(cppyy.gbl) if "_brian_" in x) + + def get_body(self, pattern: str, block: str = "run") -> str: + """ + Extract just the function body from the compiled source. + + Returns the code between the outer { } of the function definition, + ready for editing. Pass the modified body to replace_body(). + """ + name: str = self._resolve_name(pattern) + source: str = self._sources[(name, block)] + func_name: str = self._get_func_name(name, block) + _, _, body = _extract_function_parts(source, func_name) + return body + + def replace_body(self, pattern: str, block: str, new_body: str) -> str: + """ + Replace a function's body while keeping its signature intact. + + Compiles the new body under a versioned function name (because Cling + can't redefine extern "C" symbols), then swaps the code object's + function reference so the next timestep uses the new version. + + The support code (e.g. _timestep inline) is already in Cling from the + original compilation, so new_body can reference those functions freely. + + Returns the versioned function name for reference. + """ + name: str = self._resolve_name(pattern) + codeobj = self._objects[name] + from .cppyy_rt import _get_cppyy + + cppyy = _get_cppyy() + + # Bump version counter — each replacement gets a unique name + version: int = self._version_counter.get((name, block), 0) + 1 + self._version_counter[(name, block)] = version + + # Build the function signature from the param mapping + mapping: list[ParamTuple] = codeobj._param_mappings[block] + params_str: str = ", ".join( + f"{c_type} {cpp_name}" for cpp_name, _, c_type in mapping + ) + + # Compile under a versioned name + original_func_name: str = self._get_func_name(name, block) + versioned_name: str = f"{original_func_name}_v{version}" + + new_source: str = ( + f'extern "C" void {versioned_name}({params_str}) {{\n{new_body}\n}}\n' + ) + + logger.info( + f"introspector: compiling {versioned_name} (replacing {original_func_name})" + ) + cppyy.cppdef(new_source) + + # Swap the function reference — next run_block() call uses the new one + new_func: Any = getattr(cppyy.gbl, versioned_name) + codeobj.compiled_code[block] = new_func + + # Track the replacement source (with the original function name for display) + display_source: str = new_source.replace(versioned_name, original_func_name) + self._sources[(name, block)] = display_source + + return versioned_name + + def replace_source(self, pattern: str, block: str, new_source: str) -> str: + """ + Replace a function with completely new C++ source. + + For advanced users who need to modify support code or add new helpers. + The function name in new_source is automatically versioned to avoid + Cling redefinition errors. + + Warning: if new_source includes support code that's already defined + in Cling (like _timestep), you'll get a redefinition error. Use + inject_cpp() to add new helpers first, then replace_body() to use them. + """ + name: str = self._resolve_name(pattern) + codeobj = self._objects[name] + from .cppyy_rt import _get_cppyy + + cppyy = _get_cppyy() + + version: int = self._version_counter.get((name, block), 0) + 1 + self._version_counter[(name, block)] = version + + original_func_name: str = self._get_func_name(name, block) + versioned_name: str = f"{original_func_name}_v{version}" + + # Replace the function name in the user's source + patched_source: str = new_source.replace(original_func_name, versioned_name) + + cppyy.cppdef(patched_source) + new_func: Any = getattr(cppyy.gbl, versioned_name) + codeobj.compiled_code[block] = new_func + + self._sources[(name, block)] = new_source + self._version_counter[(name, block)] = version + + return versioned_name + + def restore(self, pattern: str, block: str = "run") -> None: + """Restore the original compiled function, undoing any replace_body() calls.""" + name: str = self._resolve_name(pattern) + key: tuple[str, str] = (name, block) + + if key not in self._original_funcs: + raise KeyError(f"No original function stored for {name}.{block}") + + codeobj = self._objects[name] + codeobj.compiled_code[block] = self._original_funcs[key] + self._sources[key] = self._original_sources[key] + self._version_counter.pop(key, None) + + logger.info(f"introspector: restored original {name}.{block}") + + def inject_cpp(self, code: str) -> None: + """ + Compile arbitrary C++ code into Cling's interpreter. + + Use this to define helper functions, structs, or globals that your + replacement function bodies can reference. For example: + + intro.inject_cpp(''' + inline double my_custom_activation(double x) { + return x > 0 ? x : 0.01 * x; // leaky relu + } + ''') + """ + from .cppyy_rt import _get_cppyy + + cppyy = _get_cppyy() + cppyy.cppdef(code) + logger.info("introspector: injected custom C++ code") + + def eval_cpp(self, expression: str, result_type: str = "double") -> Any: + """ + Evaluate a C++ expression and return the result to Python. + + Compiles a tiny one-off function, calls it, and returns the value. + Useful for checking constants, testing expressions, or reading globals. + + Examples: + intro.eval_cpp("M_PI") # → 3.14159... + intro.eval_cpp("_brian_mod(7, 3)", "int32_t") # → 1 + intro.eval_cpp("sizeof(double)", "size_t") # → 8 + """ + from .cppyy_rt import _get_cppyy + + cppyy = _get_cppyy() + + func_name: str = f"_brian_eval_{self._eval_counter}" + self._eval_counter += 1 + + cppyy.cppdef( + f"{result_type} {func_name}() {{ return ({result_type})({expression}); }}" + ) + return getattr(cppyy.gbl, func_name)() + + def snapshot(self, pattern: str) -> dict[str, Any]: + """ + Capture a snapshot of a code object's current state. + + Returns a plain dict with source, params, namespace values, and + version info. Useful for before/after comparisons when testing + function replacements. + """ + name: str = self._resolve_name(pattern) + codeobj = self._objects[name] + + array_snapshot: dict[str, Any] = {} + for key, val in codeobj.namespace.items(): + if isinstance(val, np.ndarray): + array_snapshot[key] = { + "shape": val.shape, + "dtype": str(val.dtype), + "min": float(val.min()) if val.size > 0 else None, + "max": float(val.max()) if val.size > 0 else None, + "mean": float(val.mean()) if val.size > 0 else None, + } + + return { + "name": name, + "sources": { + block: src for (n, block), src in self._sources.items() if n == name + }, + "versions": { + block: ver + for (n, block), ver in self._version_counter.items() + if n == name + }, + "arrays": array_snapshot, + } + + ### Internal helpers + + def _get_func_name(self, name: str, block: str) -> str: + """Build the C++ function name matching _make_func_name in codeobject.py.""" + safe: str = name.replace(".", "_").replace("*", "").replace("-", "_") + return f"_brian_cppyy_{block}_{safe}" + + def _repr_html_(self) -> str: + """Display a summary table when the introspector itself is shown in Jupyter.""" + return self.list_objects()._repr_html_() + + +def _describe_value(val: Any) -> str: + """One-line description of a namespace value.""" + if isinstance(val, np.ndarray): + if val.size <= 4: + return f"ndarray({val.shape}, {val.dtype}) = {val.tolist()}" + return ( + f"ndarray({val.shape}, {val.dtype}) " + f"range=[{val.min():.4g}, {val.max():.4g}]" + ) + elif isinstance(val, (int, np.integer)): + return f"int = {val}" + elif isinstance(val, (float, np.floating)): + return f"float = {val:.6g}" + elif hasattr(val, "__class__"): + return f"{val.__class__.__name__}" + else: + return repr(val) + + +def _extract_function_parts(source: str, func_name: str) -> tuple[str, str, str]: + """ + Split C++ source into (preamble, signature, body). + + Finds the function by name, locates the opening brace, then uses + brace-depth counting to find the matching close. Works reliably + on our generated code (well-formed, no string literals containing braces). + """ + marker: str = f"void {func_name}" + func_start: int = source.find(marker) + if func_start == -1: + raise ValueError( + f"Could not find function '{func_name}' in source. " + f"Source starts with: {source[:200]}..." + ) + + preamble: str = source[:func_start].rstrip() + + brace_pos: int = source.find("{", func_start) + if brace_pos == -1: + raise ValueError(f"No opening brace found after '{func_name}'") + + signature: str = source[func_start:brace_pos].strip() + + # Match braces to find the function body + depth: int = 0 + for i in range(brace_pos, len(source)): + if source[i] == "{": + depth += 1 + elif source[i] == "}": + depth -= 1 + if depth == 0: + body: str = source[brace_pos + 1 : i] + return preamble, signature, body + + raise ValueError(f"Unmatched braces in function '{func_name}'") + + +# --- CSS used by all display classes --- +_DISPLAY_CSS: str = """ + +""" + + +def _highlight_cpp(source: str) -> str: + """Basic C++ syntax highlighting for HTML display.""" + import re + + # Escape HTML first + s: str = html.escape(source) + + # Comments (// to end of line) + s = re.sub(r"(//.*?)$", r'\1', s, flags=re.MULTILINE) + + # Keywords + keywords = ( + r"\b(extern|void|const|for|if|else|return|static|inline|template|" + r"typename|struct|namespace|typedef|using|auto|break|continue|" + r"while|do|switch|case|default)\b" + ) + s = re.sub(keywords, r'\1', s) + + # Types + types = ( + r"\b(int|int8_t|int32_t|int64_t|size_t|long|double|float|char|" + r"bool|unsigned|void)\b" + ) + s = re.sub(types, r'\1', s) + + # Numbers + s = re.sub( + r"\b(\d+\.?\d*(?:[eE][+-]?\d+)?[fFuUlL]*)\b", + r'\1', + s, + ) + + return s + + +class ObjectListDisplay: + """Display for list_objects() — table of registered code objects.""" + + def __init__(self, rows: list[dict[str, str]]) -> None: + self.rows: list[dict[str, str]] = rows + + def _repr_html_(self) -> str: + header: str = ( + "Code ObjectTemplate" + "Compiled Blocks# Variables" + ) + body: str = "" + for row in self.rows: + body += ( + f"{html.escape(row['name'])}" + f"{html.escape(row['template'])}" + f"{html.escape(row['blocks'])}" + f"{html.escape(row['num_vars'])}" + ) + return ( + f'{_DISPLAY_CSS}
' + f"

Compiled Code Objects

" + f"{header}{body}
" + ) + + def __repr__(self) -> str: + lines: list[str] = ["Compiled Code Objects:", ""] + for row in self.rows: + lines.append( + f" {row['name']:<50s} template={row['template']:<20s} " + f"blocks=[{row['blocks']}] vars={row['num_vars']}" + ) + return "\n".join(lines) + + +class SourceDisplay: + """Display for source() — C++ code with highlighting.""" + + def __init__(self, source: str, title: str = "") -> None: + self.source: str = source + self.title: str = title + + def _repr_html_(self) -> str: + highlighted: str = _highlight_cpp(self.source) + return ( + f'{_DISPLAY_CSS}
' + f"

{html.escape(self.title)}

" + f"
{highlighted}
" + ) + + def __repr__(self) -> str: + return f"--- {self.title} ---\n{self.source}" + + def __str__(self) -> str: + return self.source + + +class ParamsDisplay: + """Display for params() — parameter mapping table.""" + + def __init__(self, rows: list[dict[str, Any]], title: str = "") -> None: + self.rows: list[dict[str, Any]] = rows + self.title: str = title + + def _repr_html_(self) -> str: + header: str = ( + "#C++ TypeParameter Name" + "Namespace KeyCurrent Value" + ) + body: str = "" + for row in self.rows: + missing_cls: str = ' class="missing"' if "MISSING" in row["value"] else "" + body += ( + f"{row['index']}" + f"{html.escape(row['c_type'])}" + f"{html.escape(row['cpp_name'])}" + f"{html.escape(row['ns_key'])}" + f"{html.escape(row['value'])}" + ) + return ( + f'{_DISPLAY_CSS}
' + f"

Parameter Mapping: {html.escape(self.title)}

" + f"{header}{body}
" + ) + + def __repr__(self) -> str: + lines: list[str] = [f"Parameter Mapping: {self.title}", ""] + for row in self.rows: + lines.append( + f" [{row['index']:>2d}] {row['c_type']:<12s} " + f"{row['cpp_name']:<44s} <- ns[{row['ns_key']}] = {row['value']}" + ) + return "\n".join(lines) + + +class NamespaceDisplay: + """Display for namespace() — categorized namespace contents.""" + + def __init__( + self, + categorized: dict[str, list[tuple[str, str]]], + title: str = "", + ) -> None: + self.categorized: dict[str, list[tuple[str, str]]] = categorized + self.title: str = title + + # Friendly category labels + _LABELS: dict[str, str] = { + "arrays": "Arrays (data pointers)", + "sizes": "Sizes (_num*)", + "constants": "Constants (scalars)", + "variable_objects": "Variable Objects (_var_*)", + "dynamic_arrays": "Dynamic Arrays", + "other": "Other", + } + + def _repr_html_(self) -> str: + sections: str = "" + for cat, entries in self.categorized.items(): + if not entries: + continue + label: str = self._LABELS.get(cat, cat) + rows: str = "" + for key, desc in entries: + rows += ( + f"{html.escape(key)}" + f"{html.escape(desc)}" + ) + sections += ( + f"
{html.escape(label)} " + f"({len(entries)})" + f"" + f"{rows}
KeyValue
" + ) + return ( + f'{_DISPLAY_CSS}
' + f"

Namespace: {html.escape(self.title)}

" + f"{sections}
" + ) + + def __repr__(self) -> str: + lines: list[str] = [f"Namespace: {self.title}", ""] + for cat, entries in self.categorized.items(): + if not entries: + continue + label: str = self._LABELS.get(cat, cat) + lines.append(f" [{label}]") + for key, desc in entries: + lines.append(f" {key:<50s} {desc}") + lines.append("") + return "\n".join(lines) + + +class InspectDisplay: + """Display for inspect() — combined source + params + namespace.""" + + def __init__( + self, + source: SourceDisplay, + params: ParamsDisplay, + namespace: NamespaceDisplay, + title: str = "", + ) -> None: + self.source: SourceDisplay = source + self.params: ParamsDisplay = params + self.namespace: NamespaceDisplay = namespace + self.title: str = title + + def _repr_html_(self) -> str: + return ( + f'{_DISPLAY_CSS}
' + f"

Inspect: {html.escape(self.title)}

" + f"
C++ Source" + f"
{_highlight_cpp(self.source.source)}
" + f"
Parameter Mapping" + f"{self.params._repr_html_()}
" + f"
Namespace (click to expand)" + f"{self.namespace._repr_html_()}
" + f"
" + ) + + def __repr__(self) -> str: + return ( + f"{'=' * 60}\n" + f"INSPECT: {self.title}\n" + f"{'=' * 60}\n\n" + f"{repr(self.source)}\n\n" + f"{repr(self.params)}\n\n" + f"{repr(self.namespace)}" + ) diff --git a/brian2/codegen/runtime/cppyy_rt/templates/spikemonitor.cpp b/brian2/codegen/runtime/cppyy_rt/templates/spikemonitor.cpp new file mode 100644 index 000000000..98717840a --- /dev/null +++ b/brian2/codegen/runtime/cppyy_rt/templates/spikemonitor.cpp @@ -0,0 +1,73 @@ +{# USES_VARIABLES { N, count, _clock_t, _source_start, _source_stop, _source_N } #} +{# WRITES_TO_READ_ONLY_VARIABLES { N, count } #} +{% extends 'common_group.cpp' %} + +{% block maincode %} + {# Get the spikespace array name #} + {% set _eventspace = get_array_name(eventspace_variable) %} + + int32_t _num_events = {{ _eventspace }}[_num{{ eventspace_variable.name }} - 1]; + + if (_num_events > 0) { + // ── Filter for subgroup range ── + size_t _start_idx = _num_events; + size_t _end_idx = _num_events; + + for (size_t _j = 0; _j < (size_t)_num_events; _j++) { + const int _idx = {{ _eventspace }}[_j]; + if (_idx >= _source_start) { + _start_idx = _j; + break; + } + } + for (size_t _j = _num_events - 1; _j >= _start_idx; _j--) { + const int _idx = {{ _eventspace }}[_j]; + if (_idx < _source_stop) { + break; + } + _end_idx = _j; + } + _num_events = _end_idx - _start_idx; + + if (_num_events > 0) { + // Scalar code + const size_t _vectorisation_idx = 1; + {{ scalar_code | autoindent }} + + size_t _curlen = {{ N }}; + size_t _newlen = _curlen + _num_events; + + // ── Resize all recorded dynamic arrays via capsules ── + {% for varname, var in record_variables | dictsort %} + {% set _dyn_name = get_array_name(var, access_data=False) %} + {% set _capsule_name = _dyn_name + "_capsule" %} + {% set _rec_ctype = c_data_type(var.dtype) %} + { + auto* _dyn_{{ varname }} = _extract_dynamic_array_1d<{{ _rec_ctype }}>({{ _capsule_name }}); + _dyn_{{ varname }}->resize(_newlen); + } + {% endfor %} + + // Update N after resize + {{ N }} = _newlen; + + // ── Record each spike ── + for (size_t _j = _start_idx; _j < _end_idx; _j++) { + const size_t _idx = {{ _eventspace }}[_j]; + const size_t _vectorisation_idx = _idx; + {{ vector_code | autoindent }} + + {% for varname, var in record_variables | dictsort %} + {% set _dyn_name = get_array_name(var, access_data=False) %} + {% set _rec_ctype = c_data_type(var.dtype) %} + { + auto* _dyn = _extract_dynamic_array_1d<{{ _rec_ctype }}>({{ _dyn_name }}_capsule); + _dyn->get_data_ptr()[_curlen + _j - _start_idx] = _to_record_{{ varname }}; + } + {% endfor %} + + {{ count }}[_idx - _source_start]++; + } + } + } +{% endblock %} diff --git a/brian2/codegen/runtime/cppyy_rt/templates/statemonitor.cpp b/brian2/codegen/runtime/cppyy_rt/templates/statemonitor.cpp index 4f0874b49..c7a636221 100644 --- a/brian2/codegen/runtime/cppyy_rt/templates/statemonitor.cpp +++ b/brian2/codegen/runtime/cppyy_rt/templates/statemonitor.cpp @@ -1,24 +1,61 @@ -{# State monitor template for cppyy backend #} -{# USES_VARIABLES { _clock_t, _indices, N } #} +{# USES_VARIABLES { t, _clock_t, _indices, N } #} +{# WRITES_TO_READ_ONLY_VARIABLES { t, N } #} {% extends 'common_group.cpp' %} {% block maincode %} - //// MAIN CODE //////////// - const double _current_t = {{ _clock_t }}; - const int _num_indices = _num{{ _indices }}; + // ── Extract DynamicArray objects from capsules ── + // These are the SAME C++ objects that Cython created. The capsule + // holds a void* to the DynamicArray1D that the RuntimeDevice + // allocated. We cast it back to the correct type and can call resize(), + // get_data_ptr(), etc. — all in C++, no Python overhead. + + {% set _t_capsule = "_dynamic_array_" + owner.name + "_t_capsule" %} + auto* _dyn_t = _extract_dynamic_array_1d({{ _t_capsule }}); + + // Get current size and compute new size + size_t _old_len = _dyn_t->size(); + size_t _new_len = _old_len + 1; + + // Resize the time array — this may reallocate the underlying buffer + _dyn_t->resize(_new_len); + + // Write the current clock time into the last element + _dyn_t->get_data_ptr()[_new_len - 1] = {{ _clock_t }}; + + // ── Resize each recorded variable's 2D array ── + {% for varname, var in _recorded_variables | dictsort %} + {% set _rec_capsule = get_array_name(var, access_data=False) + "_capsule" %} + {% set _rec_ctype = c_data_type(var.dtype) %} + { + auto* _dyn_{{ varname }} = _extract_dynamic_array_2d<{{ _rec_ctype }}>({{ _rec_capsule }}); + _dyn_{{ varname }}->resize_along_first(_new_len); + } + {% endfor %} + + // ── Scalar code (runs once) ── + const size_t _vectorisation_idx = -1; + {{ scalar_code | autoindent }} - // Record time - {{ _dynamic_t }}.push_back(_current_t); - // Record state variables for each monitored index for (int _i = 0; _i < _num_indices; _i++) { - const int _idx = {{ _indices }}[_i]; + const size_t _idx = {{ _indices }}[_i]; const size_t _vectorisation_idx = _idx; + {{ vector_code | autoindent }} - {% for varname in record_variables %} - // Record {{ varname }} - {{ vector_code[varname] | autoindent }} - {{ _dynamic_ ~ varname }}.push_back(_to_record_{{ varname }}); + // Write recorded values into the last row of each 2D array. + // After resize, get_data_ptr() returns the (potentially new) buffer, + // and we index using stride * row + col to handle over-allocation. + {% for varname, var in _recorded_variables | dictsort %} + {% set _rec_capsule = get_array_name(var, access_data=False) + "_capsule" %} + {% set _rec_ctype = c_data_type(var.dtype) %} + { + auto* _dyn = _extract_dynamic_array_2d<{{ _rec_ctype }}>({{ _rec_capsule }}); + _dyn->operator()(_new_len - 1, _i) = _to_record_{{ varname }}; + } {% endfor %} } + + // Update N (the number of recorded timesteps) + {{ N }} = _new_len; + {% endblock %} From 69fe7476baaa3dd46eee3c66f512a7af83541c72 Mon Sep 17 00:00:00 2001 From: Legend101Zz <96632943+Legend101Zz@users.noreply.github.com> Date: Sat, 14 Feb 2026 16:02:17 +0530 Subject: [PATCH 05/29] add: jupyter notebook for tests --- brian2/codegen/runtime/cppyy_rt/cppyy_rt.py | 53 ++ .../codegen/runtime/cppyy_rt/introspector.py | 508 ++++++----- jupyter-cppyy/test.ipynb | 803 ++++++++++++++++++ 3 files changed, 1136 insertions(+), 228 deletions(-) create mode 100644 jupyter-cppyy/test.ipynb diff --git a/brian2/codegen/runtime/cppyy_rt/cppyy_rt.py b/brian2/codegen/runtime/cppyy_rt/cppyy_rt.py index 65eace597..bb4a925d2 100644 --- a/brian2/codegen/runtime/cppyy_rt/cppyy_rt.py +++ b/brian2/codegen/runtime/cppyy_rt/cppyy_rt.py @@ -15,6 +15,7 @@ from __future__ import annotations +import hashlib import importlib.util import os from collections.abc import Callable @@ -85,6 +86,53 @@ _cppyy: Any = None +def _guard_support_code(code: str) -> str: + """ + Wrap per-codeobject support code in #ifndef guards to prevent + Cling redefinition errors. + + When Brian2 calls run() multiple times, it recreates code objects that + generate identical support code (inline functions like _timestep, _rand). + Cling can't redefine symbols, but it dores preserve preprocessor state + across cppyy.cppdef() calls. So we wrap the support code in #ifndef + guards keyed by a content hash — if Cling already compiled this exact + block, the preprocessor skips it. + + The generated code has a predictable structure: + // Per-codeobject support code + [hash defines] + [inline function definitions] # this part gets guarded + // Template-specific support code + extern "C" void _brian_cppyy_...(...) { ... } + + We split at 'extern "C"', guard everything before it, and leave the + function definition (which has a unique name) unguarded. + """ + marker: str = 'extern "C"' + pos: int = code.find(marker) + if pos == -1: + # No function definition found — nothing to guard + return code + + support: str = code[:pos] + func_def: str = code[pos:] + + # Check if there's actual compilable code (not just comments/whitespace) + real_lines: list[str] = [ + line.strip() + for line in support.split("\n") + if line.strip() and not line.strip().startswith("//") + ] + if not real_lines: + # Only comments before extern "C" — no risk of redefinition + return code + + content_hash: str = hashlib.md5("\n".join(real_lines).encode()).hexdigest()[:16] + guard: str = f"_BRIAN_CPPYY_SC_{content_hash}" + + return f"#ifndef {guard}\n#define {guard}\n{support}#endif // {guard}\n\n{func_def}" + + def _get_cppyy() -> Any: """Import cppyy on first use so we don't blow up at import time.""" global _cppyy @@ -451,6 +499,11 @@ def compile_block(self, block: str) -> Any | None: cppyy = _get_cppyy() _ensure_support_code() + # Guard support code against redefinition (happens when run() is + # called multiple times — Brian2 recreates code objects with the + # same inline function definitions) + code = _guard_support_code(code) + logger.diagnostic(f"cppyy: compiling '{block}' for {self.name}") try: cppyy.cppdef(code) diff --git a/brian2/codegen/runtime/cppyy_rt/introspector.py b/brian2/codegen/runtime/cppyy_rt/introspector.py index 996ddd357..52b3de73c 100644 --- a/brian2/codegen/runtime/cppyy_rt/introspector.py +++ b/brian2/codegen/runtime/cppyy_rt/introspector.py @@ -1,29 +1,28 @@ """ Runtime introspection for the cppyy backend. -Provides a live window into the C++ JIT interpreter during simulation. Enable with: prefs.codegen.runtime.cppyy.enable_introspection = True -Usage in a Jupyter notebook or script: - +Usage: from brian2.codegen.runtime.cppyy_rt.introspector import get_introspector intro = get_introspector() intro.list_objects() # see all compiled code objects - intro.source("neurongroup_stateupdater_*") # view generated C++ - intro.params("neurongroup_stateupdater_*") # view parameter mapping - intro.namespace("neurongroup_stateupdater_*") # view runtime values + intro.source("*stateupdater*") # view generated C++ + intro.params("*stateupdater*") # view parameter mapping + intro.namespace("*stateupdater*") # view runtime values + intro.inspect("*stateupdater*") # all of the above - body = intro.get_body("neurongroup_stateupdater_*", "run") + body = intro.get_body("*stateupdater*", "run") new_body = body.replace("exp(_lio_2)", "1.0 + _lio_2") - intro.replace_body("neurongroup_stateupdater_*", "run", new_body) - - intro.restore("neurongroup_stateupdater_*", "run") # undo + intro.replace_body("*stateupdater*", "run", new_body) + intro.restore("*stateupdater*", "run") # undo """ from __future__ import annotations import html +import re as _re from fnmatch import fnmatch from typing import Any @@ -34,51 +33,49 @@ logger = get_logger(__name__) -# --- Type aliases --- ParamTuple = tuple[str, str, str] +# --- Optional rich support --- +_RICH_AVAILABLE: bool = False +try: + from rich.console import Console + from rich.panel import Panel + from rich.syntax import Syntax + from rich.table import Table + from rich.text import Text + from rich.tree import Tree -def get_introspector() -> CppyyIntrospector | None: - """ - Get the global introspector instance, or None if introspection is disabled. + _RICH_AVAILABLE = True +except ImportError: + pass - Returns None (not an error) when the preference is off, so callers can do: - if intro := get_introspector(): - intro.list_objects() - """ + +def get_introspector() -> CppyyIntrospector | None: + """Get the global introspector, or None if disabled.""" return CppyyIntrospector.get_instance() class CppyyIntrospector: """ - A live debugging interface into cppyy's JIT-compiled C++ code. + Live debugging interface into cppyy's JIT-compiled C++ code. - This is a singleton — all code objects register with the same instance. - Created lazily on first access when introspection is enabled. + Singleton — all code objects register with the same instance. """ _instance: CppyyIntrospector | None = None def __init__(self) -> None: - # All registered code objects, keyed by name - self._objects: dict[str, Any] = {} # name → CppyyCodeObject - - # C++ source for each (codeobj_name, block) pair + self._objects: dict[str, Any] = {} self._sources: dict[tuple[str, str], str] = {} - - # Original source and function ref, for restore() self._original_sources: dict[tuple[str, str], str] = {} self._original_funcs: dict[tuple[str, str], Any] = {} - - # Version counter for function replacement (can't redefine extern "C") self._version_counter: dict[tuple[str, str], int] = {} - - # Counter for eval_cpp one-off functions self._eval_counter: int = 0 + # Track registration order so we can prefer latest + self._registration_order: list[str] = [] @classmethod def get_instance(cls) -> CppyyIntrospector | None: - """Get or create the singleton. Returns None if introspection is disabled.""" if not prefs.codegen.runtime.cppyy.enable_introspection: return None if cls._instance is None: @@ -87,63 +84,73 @@ def get_instance(cls) -> CppyyIntrospector | None: @classmethod def reset(cls) -> None: - """Tear down the singleton. Useful between test runs.""" cls._instance = None - def register(self, codeobj: Any, block: str, source: str) -> None: - """ - Record a code object and the C++ source it compiled. + # === Registration === - Called automatically by compile_block() when introspection is enabled. - Stores both the code object reference (for live namespace access) and - the source string (for display and function replacement). - """ + def register(self, codeobj: Any, block: str, source: str) -> None: name: str = codeobj.name self._objects[name] = codeobj self._sources[(name, block)] = source self._original_sources[(name, block)] = source self._original_funcs[(name, block)] = codeobj.compiled_code.get(block) + if name not in self._registration_order: + self._registration_order.append(name) logger.diagnostic(f"introspector: registered {name}.{block}") + # === Name resolution === + def _resolve_name(self, pattern: str) -> str: """ Resolve a name or glob pattern to a single code object name. - Allows shorthand like "stateupdater*" instead of the full - "neurongroup_stateupdater_codeobject" name. + When multiple objects match (e.g. *stateupdater* matching both + _codeobject and _codeobject_1), we prefer the LATEST registered + match. This is usually what the user wants — after a second run(), + the new code object is the active one. But if the user modified + the original and is calling restore(), the original is still there. + + If disambiguation is needed, uses these heuristics: + 1. If one match lacks a trailing _\d+ suffix and others have one, + prefer the base name (the "original" code object). + 2. Otherwise prefer the most recently registered. """ - # Exact match first if pattern in self._objects: return pattern - # Glob match matches: list[str] = [name for name in self._objects if fnmatch(name, pattern)] - if len(matches) == 1: - return matches[0] - elif len(matches) == 0: + + if len(matches) == 0: available: str = ", ".join(sorted(self._objects.keys())) raise KeyError( f"No code object matching '{pattern}'. Available: {available}" ) - else: - raise KeyError( - f"Pattern '{pattern}' matches multiple objects: {matches}. " - f"Be more specific." - ) + + if len(matches) == 1: + return matches[0] + + # Multiple matches — try to pick the most useful one. + # Prefer the base name (without _1, _2 suffix) if it exists. + base_matches: list[str] = [m for m in matches if not _re.search(r"_\d+$", m)] + if len(base_matches) == 1: + return base_matches[0] + + # Fall back to most recently registered + for name in reversed(self._registration_order): + if name in matches: + return name + + return matches[0] def _resolve_names(self, pattern: str) -> list[str]: - """Resolve a pattern to all matching names (for list_objects filtering).""" if pattern == "*": return sorted(self._objects.keys()) return sorted(name for name in self._objects if fnmatch(name, pattern)) - def list_objects(self, pattern: str = "*") -> ObjectListDisplay: - """ - List all registered code objects, their blocks, and template types. + # === Inspection === - Returns a display object that renders as a table in Jupyter or - as formatted text in a terminal. - """ + def list_objects(self, pattern: str = "*") -> ObjectListDisplay: + """List all registered code objects, their blocks, and template types.""" rows: list[dict[str, str]] = [] for name in self._resolve_names(pattern): codeobj = self._objects[name] @@ -152,23 +159,20 @@ def list_objects(self, pattern: str = "*") -> ObjectListDisplay: for block in ("before_run", "run", "after_run") if (name, block) in self._sources ] + is_active: bool = name in self._registration_order[-len(self._objects) :] rows.append( { "name": name, "template": getattr(codeobj, "template_name", "?"), "blocks": ", ".join(blocks), "num_vars": str(len(codeobj.variables)), + "active": "●" if is_active else "○", } ) return ObjectListDisplay(rows) def source(self, pattern: str, block: str = "run") -> SourceDisplay: - """ - View the C++ source for a code object's block. - - Returns a display object with the source code. In Jupyter, this - renders with basic syntax highlighting. - """ + """View the C++ source for a code object's block.""" name: str = self._resolve_name(pattern) key: tuple[str, str] = (name, block) if key not in self._sources: @@ -179,12 +183,7 @@ def source(self, pattern: str, block: str = "run") -> SourceDisplay: return SourceDisplay(self._sources[key], title=f"{name}.{block}") def params(self, pattern: str, block: str = "run") -> ParamsDisplay: - """ - View the parameter mapping for a code object's block. - - Shows how each C++ function parameter maps to a namespace key - and its current runtime value. - """ + """View parameter mapping with current runtime values.""" name: str = self._resolve_name(pattern) codeobj = self._objects[name] mapping: list[ParamTuple] = codeobj._param_mappings.get(block, []) @@ -204,11 +203,7 @@ def params(self, pattern: str, block: str = "run") -> ParamsDisplay: return ParamsDisplay(rows, title=f"{name}.{block}") def namespace(self, pattern: str) -> NamespaceDisplay: - """ - View the full namespace dict for a code object, categorized by type. - - Categories: arrays, sizes, constants, variables, dynamic arrays, other. - """ + """View the full namespace dict, categorized by type.""" name: str = self._resolve_name(pattern) codeobj = self._objects[name] ns: dict[str, Any] = codeobj.namespace @@ -242,10 +237,7 @@ def namespace(self, pattern: str) -> NamespaceDisplay: return NamespaceDisplay(categorized, title=name) def inspect(self, pattern: str, block: str = "run") -> InspectDisplay: - """ - Full inspection: source + params + namespace in one view. - In Jupyter, renders as collapsible sections. - """ + """Full inspection: source + params + namespace in one view.""" name: str = self._resolve_name(pattern) return InspectDisplay( source=self.source(name, block), @@ -261,13 +253,10 @@ def cpp_globals(self) -> list[str]: cppyy = _get_cppyy() return sorted(x for x in dir(cppyy.gbl) if "_brian_" in x) - def get_body(self, pattern: str, block: str = "run") -> str: - """ - Extract just the function body from the compiled source. + # === Modification === - Returns the code between the outer { } of the function definition, - ready for editing. Pass the modified body to replace_body(). - """ + def get_body(self, pattern: str, block: str = "run") -> str: + """Extract just the function body, ready for editing.""" name: str = self._resolve_name(pattern) source: str = self._sources[(name, block)] func_name: str = self._get_func_name(name, block) @@ -276,16 +265,11 @@ def get_body(self, pattern: str, block: str = "run") -> str: def replace_body(self, pattern: str, block: str, new_body: str) -> str: """ - Replace a function's body while keeping its signature intact. - - Compiles the new body under a versioned function name (because Cling - can't redefine extern "C" symbols), then swaps the code object's - function reference so the next timestep uses the new version. + Replace a function's body, keeping its signature. - The support code (e.g. _timestep inline) is already in Cling from the - original compilation, so new_body can reference those functions freely. - - Returns the versioned function name for reference. + Compiles under a versioned name (_v1, _v2...) since Cling can't + redefine extern "C" symbols. Swaps the code object's function ref. + Returns the versioned function name. """ name: str = self._resolve_name(pattern) codeobj = self._objects[name] @@ -293,17 +277,14 @@ def replace_body(self, pattern: str, block: str, new_body: str) -> str: cppyy = _get_cppyy() - # Bump version counter — each replacement gets a unique name version: int = self._version_counter.get((name, block), 0) + 1 self._version_counter[(name, block)] = version - # Build the function signature from the param mapping mapping: list[ParamTuple] = codeobj._param_mappings[block] params_str: str = ", ".join( f"{c_type} {cpp_name}" for cpp_name, _, c_type in mapping ) - # Compile under a versioned name original_func_name: str = self._get_func_name(name, block) versioned_name: str = f"{original_func_name}_v{version}" @@ -316,28 +297,16 @@ def replace_body(self, pattern: str, block: str, new_body: str) -> str: ) cppyy.cppdef(new_source) - # Swap the function reference — next run_block() call uses the new one new_func: Any = getattr(cppyy.gbl, versioned_name) codeobj.compiled_code[block] = new_func - # Track the replacement source (with the original function name for display) display_source: str = new_source.replace(versioned_name, original_func_name) self._sources[(name, block)] = display_source return versioned_name def replace_source(self, pattern: str, block: str, new_source: str) -> str: - """ - Replace a function with completely new C++ source. - - For advanced users who need to modify support code or add new helpers. - The function name in new_source is automatically versioned to avoid - Cling redefinition errors. - - Warning: if new_source includes support code that's already defined - in Cling (like _timestep), you'll get a redefinition error. Use - inject_cpp() to add new helpers first, then replace_body() to use them. - """ + """Replace with completely new C++ source. Function name auto-versioned.""" name: str = self._resolve_name(pattern) codeobj = self._objects[name] from .cppyy_rt import _get_cppyy @@ -350,20 +319,16 @@ def replace_source(self, pattern: str, block: str, new_source: str) -> str: original_func_name: str = self._get_func_name(name, block) versioned_name: str = f"{original_func_name}_v{version}" - # Replace the function name in the user's source patched_source: str = new_source.replace(original_func_name, versioned_name) - cppyy.cppdef(patched_source) new_func: Any = getattr(cppyy.gbl, versioned_name) codeobj.compiled_code[block] = new_func self._sources[(name, block)] = new_source - self._version_counter[(name, block)] = version - return versioned_name def restore(self, pattern: str, block: str = "run") -> None: - """Restore the original compiled function, undoing any replace_body() calls.""" + """Restore the original compiled function.""" name: str = self._resolve_name(pattern) key: tuple[str, str] = (name, block) @@ -378,18 +343,7 @@ def restore(self, pattern: str, block: str = "run") -> None: logger.info(f"introspector: restored original {name}.{block}") def inject_cpp(self, code: str) -> None: - """ - Compile arbitrary C++ code into Cling's interpreter. - - Use this to define helper functions, structs, or globals that your - replacement function bodies can reference. For example: - - intro.inject_cpp(''' - inline double my_custom_activation(double x) { - return x > 0 ? x : 0.01 * x; // leaky relu - } - ''') - """ + """Compile arbitrary C++ into Cling (define helpers, structs, etc.).""" from .cppyy_rt import _get_cppyy cppyy = _get_cppyy() @@ -397,37 +351,19 @@ def inject_cpp(self, code: str) -> None: logger.info("introspector: injected custom C++ code") def eval_cpp(self, expression: str, result_type: str = "double") -> Any: - """ - Evaluate a C++ expression and return the result to Python. - - Compiles a tiny one-off function, calls it, and returns the value. - Useful for checking constants, testing expressions, or reading globals. - - Examples: - intro.eval_cpp("M_PI") # → 3.14159... - intro.eval_cpp("_brian_mod(7, 3)", "int32_t") # → 1 - intro.eval_cpp("sizeof(double)", "size_t") # → 8 - """ + """Evaluate a C++ expression and return the result.""" from .cppyy_rt import _get_cppyy cppyy = _get_cppyy() - func_name: str = f"_brian_eval_{self._eval_counter}" self._eval_counter += 1 - cppyy.cppdef( f"{result_type} {func_name}() {{ return ({result_type})({expression}); }}" ) return getattr(cppyy.gbl, func_name)() def snapshot(self, pattern: str) -> dict[str, Any]: - """ - Capture a snapshot of a code object's current state. - - Returns a plain dict with source, params, namespace values, and - version info. Useful for before/after comparisons when testing - function replacements. - """ + """Capture current state as a plain dict (for comparisons).""" name: str = self._resolve_name(pattern) codeobj = self._objects[name] @@ -455,20 +391,166 @@ def snapshot(self, pattern: str) -> dict[str, Any]: "arrays": array_snapshot, } - ### Internal helpers + # === Rich CLI display === + + def print_objects(self, pattern: str = "*") -> None: + """Pretty-print all code objects to the terminal.""" + display = self.list_objects(pattern) + if _RICH_AVAILABLE: + _rich_print_objects(display) + else: + print(repr(display)) + + def print_source(self, pattern: str, block: str = "run") -> None: + """Pretty-print C++ source with syntax highlighting.""" + display = self.source(pattern, block) + if _RICH_AVAILABLE: + _rich_print_source(display) + else: + print(repr(display)) + + def print_params(self, pattern: str, block: str = "run") -> None: + """Pretty-print parameter mapping.""" + display = self.params(pattern, block) + if _RICH_AVAILABLE: + _rich_print_params(display) + else: + print(repr(display)) + + def print_namespace(self, pattern: str) -> None: + """Pretty-print namespace contents.""" + display = self.namespace(pattern) + if _RICH_AVAILABLE: + _rich_print_namespace(display) + else: + print(repr(display)) + + def print_inspect(self, pattern: str, block: str = "run") -> None: + """Pretty-print full inspection (source + params + namespace).""" + display = self.inspect(pattern, block) + if _RICH_AVAILABLE: + _rich_print_inspect(display) + else: + print(repr(display)) + + # === Internal === def _get_func_name(self, name: str, block: str) -> str: - """Build the C++ function name matching _make_func_name in codeobject.py.""" safe: str = name.replace(".", "_").replace("*", "").replace("-", "_") return f"_brian_cppyy_{block}_{safe}" def _repr_html_(self) -> str: - """Display a summary table when the introspector itself is shown in Jupyter.""" return self.list_objects()._repr_html_() +# ========================================================================= +# Rich CLI renderers (only used when `rich` is installed) +# ========================================================================= + + +def _rich_print_objects(display: ObjectListDisplay) -> None: + console = Console() + table = Table( + title="[bold]Compiled Code Objects[/bold]", + show_header=True, + header_style="bold cyan", + border_style="dim", + ) + table.add_column("", width=1) + table.add_column("Code Object", style="green") + table.add_column("Template", style="yellow") + table.add_column("Blocks") + table.add_column("# Vars", justify="right") + + for row in display.rows: + table.add_row( + row.get("active", "●"), + row["name"], + row["template"], + row["blocks"], + row["num_vars"], + ) + console.print(table) + + +def _rich_print_source(display: SourceDisplay) -> None: + console = Console() + syntax = Syntax( + display.source, "cpp", theme="monokai", line_numbers=True, word_wrap=False + ) + console.print( + Panel(syntax, title=f"[bold]{display.title}[/bold]", border_style="cyan") + ) + + +def _rich_print_params(display: ParamsDisplay) -> None: + console = Console() + table = Table( + title=f"[bold]Parameter Mapping: {display.title}[/bold]", + show_header=True, + header_style="bold cyan", + border_style="dim", + ) + table.add_column("#", justify="right", style="dim", width=4) + table.add_column("C++ Type", style="magenta") + table.add_column("Parameter Name", style="green") + table.add_column("Namespace Key", style="yellow") + table.add_column("Current Value") + + for row in display.rows: + val_style = "red bold" if "MISSING" in row["value"] else "" + table.add_row( + str(row["index"]), + row["c_type"], + row["cpp_name"], + row["ns_key"], + Text(row["value"], style=val_style), + ) + console.print(table) + + +def _rich_print_namespace(display: NamespaceDisplay) -> None: + console = Console() + tree = Tree(f"[bold]Namespace: {display.title}[/bold]") + + labels: dict[str, str] = { + "arrays": "[cyan]Arrays (data pointers)[/cyan]", + "sizes": "[yellow]Sizes (_num*)[/yellow]", + "constants": "[green]Constants (scalars)[/green]", + "variable_objects": "[dim]Variable Objects (_var_*)[/dim]", + "dynamic_arrays": "[magenta]Dynamic Arrays[/magenta]", + "other": "[dim]Other[/dim]", + } + + for cat, entries in display.categorized.items(): + if not entries: + continue + branch = tree.add(f"{labels.get(cat, cat)} ({len(entries)})") + for key, desc in entries: + branch.add(f"[bold]{key}[/bold] → {desc}") + + console.print(tree) + + +def _rich_print_inspect(display: InspectDisplay) -> None: + console = Console() + console.print() + console.rule(f"[bold cyan]Inspect: {display.title}[/bold cyan]") + console.print() + + _rich_print_source(display.source) + console.print() + _rich_print_params(display.params) + console.print() + _rich_print_namespace(display.namespace) + + +# ========================================================================= +# Value description helper +# ========================================================================= + + def _describe_value(val: Any) -> str: - """One-line description of a namespace value.""" if isinstance(val, np.ndarray): if val.size <= 4: return f"ndarray({val.shape}, {val.dtype}) = {val.tolist()}" @@ -481,19 +563,13 @@ def _describe_value(val: Any) -> str: elif isinstance(val, (float, np.floating)): return f"float = {val:.6g}" elif hasattr(val, "__class__"): - return f"{val.__class__.__name__}" + return val.__class__.__name__ else: return repr(val) def _extract_function_parts(source: str, func_name: str) -> tuple[str, str, str]: - """ - Split C++ source into (preamble, signature, body). - - Finds the function by name, locates the opening brace, then uses - brace-depth counting to find the matching close. Works reliably - on our generated code (well-formed, no string literals containing braces). - """ + """Split C++ source into (preamble, signature, body) by brace matching.""" marker: str = f"void {func_name}" func_start: int = source.find(marker) if func_start == -1: @@ -503,14 +579,12 @@ def _extract_function_parts(source: str, func_name: str) -> tuple[str, str, str] ) preamble: str = source[:func_start].rstrip() - brace_pos: int = source.find("{", func_start) if brace_pos == -1: raise ValueError(f"No opening brace found after '{func_name}'") signature: str = source[func_start:brace_pos].strip() - # Match braces to find the function body depth: int = 0 for i in range(brace_pos, len(source)): if source[i] == "{": @@ -524,7 +598,10 @@ def _extract_function_parts(source: str, func_name: str) -> tuple[str, str, str] raise ValueError(f"Unmatched braces in function '{func_name}'") -# --- CSS used by all display classes --- +# ========================================================================= +# Jupyter HTML display classes +# ========================================================================= + _DISPLAY_CSS: str = """ \n", + "

Compiled Code Objects

Code ObjectTemplateCompiled Blocks# Variables
neurongroup_spike_resetter_codeobjectresetrun5
neurongroup_spike_thresholder_codeobjectthresholdrun8
neurongroup_stateupdater_codeobjectstateupdaterun11
statemonitor_codeobjectstatemonitorrun8
" + ], + "text/plain": [ + "Compiled Code Objects:\n", + "\n", + " ● neurongroup_spike_resetter_codeobject template=reset blocks=[run] vars=5\n", + " ● neurongroup_spike_thresholder_codeobject template=threshold blocks=[run] vars=8\n", + " ● neurongroup_stateupdater_codeobject template=stateupdate blocks=[run] vars=11\n", + " ● statemonitor_codeobject template=statemonitor blocks=[run] vars=8" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# ---- 1. List all compiled code objects ----\n", + "intro.list_objects()" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "518d84c6", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "

Inspect: neurongroup_stateupdater_codeobject.run

C++ Source
#ifndef _BRIAN_CPPYY_SC_41c7dc13377ec1af\n",
+       "#define _BRIAN_CPPYY_SC_41c7dc13377ec1af\n",
+       "// Per-codeobject support code\n",
+       "\n",
+       "inline int64_t _timestep(double t, double dt) {\n",
+       "    return (int64_t)((t + 1e-3*dt)/dt);\n",
+       "}\n",
+       "\n",
+       "// Template-specific support code (e.g. synaptic queue access)\n",
+       "\n",
+       "#endif // _BRIAN_CPPYY_SC_41c7dc13377ec1af\n",
+       "\n",
+       "extern "C" void _brian_cppyy_run_neurongroup_stateupdater_codeobject(int64_t N, double* _ptr_array_defaultclock_dt, double* _ptr_array_neurongroup_lastspike, int _numlastspike, int8_t* _ptr_array_neurongroup_not_refractory, int _numnot_refractory, double* _ptr_array_defaultclock_t, double tau, double* _ptr_array_neurongroup_v, int _numv, double* _ptr_array_neurongroup_v0, int _numv0) {\n",
+       "    \n",
+       "    // scalar code (runs once, outside the loop)\n",
+       "    const size_t _vectorisation_idx = -1;\n",
+       "        \n",
+       "    const double dt = _ptr_array_defaultclock_dt[0];\n",
+       "    const double t = _ptr_array_defaultclock_t[0];\n",
+       "    const int64_t _lio_1 = _timestep(0.005, dt);\n",
+       "    const double _lio_2 = 1.0f*(- dt)/tau;\n",
+       "    const double _lio_3 = exp(_lio_2);\n",
+       "\n",
+       "\n",
+       "    const int _N = N;\n",
+       "\n",
+       "    // vector code (runs per neuron)\n",
+       "    for (int _idx = 0; _idx < _N; _idx++) {\n",
+       "        const size_t _vectorisation_idx = _idx;\n",
+       "                \n",
+       "        const double lastspike = _ptr_array_neurongroup_lastspike[_idx];\n",
+       "        char not_refractory = _ptr_array_neurongroup_not_refractory[_idx];\n",
+       "        double v = _ptr_array_neurongroup_v[_idx];\n",
+       "        const double v0 = _ptr_array_neurongroup_v0[_idx];\n",
+       "        not_refractory = _timestep(t - lastspike, dt) >= _lio_1;\n",
+       "        double _v;\n",
+       "        if(!not_refractory)\n",
+       "            _v = (v + v0) - v0;\n",
+       "        else \n",
+       "            _v = ((_lio_3 * v) + v0) - (_lio_3 * v0);\n",
+       "        if(not_refractory)\n",
+       "            v = _v;\n",
+       "        _ptr_array_neurongroup_not_refractory[_idx] = not_refractory;\n",
+       "        _ptr_array_neurongroup_v[_idx] = v;\n",
+       "\n",
+       "    }\n",
+       "}
Parameter Mapping\n", + "\n", + "

Parameter Mapping: neurongroup_stateupdater_codeobject.run

#C++ TypeParameter NameNamespace KeyCurrent Value
0int64_tNNint = 100
1double*_ptr_array_defaultclock_dt_ptr_array_defaultclock_dtndarray((1,), float64) = [0.0001]
2double*_ptr_array_neurongroup_lastspike_ptr_array_neurongroup_lastspikendarray((100,), float64) range=[-1e+04, -1e+04]
3int_numlastspike_numlastspikeint = 100
4int8_t*_ptr_array_neurongroup_not_refractory_ptr_array_neurongroup_not_refractoryndarray((100,), bool) range=[1, 1]
5int_numnot_refractory_numnot_refractoryint = 100
6double*_ptr_array_defaultclock_t_ptr_array_defaultclock_tndarray((1,), float64) = [0.1]
7doubletautaufloat = 0.01
8double*_ptr_array_neurongroup_v_ptr_array_neurongroup_vndarray((100,), float64) range=[0, 0.009957]
9int_numv_numvint = 100
10double*_ptr_array_neurongroup_v0_ptr_array_neurongroup_v0ndarray((100,), float64) range=[0, 0.02]
11int_numv0_numv0int = 100
Namespace (click to expand)\n", + "\n", + "

Namespace: neurongroup_stateupdater_codeobject

Arrays (data pointers) (6)
KeyValue
_ptr_array_defaultclock_dtndarray((1,), float64) = [0.0001]
_ptr_array_defaultclock_tndarray((1,), float64) = [0.1]
_ptr_array_neurongroup_lastspikendarray((100,), float64) range=[-1e+04, -1e+04]
_ptr_array_neurongroup_not_refractoryndarray((100,), bool) range=[1, 1]
_ptr_array_neurongroup_vndarray((100,), float64) range=[0, 0.009957]
_ptr_array_neurongroup_v0ndarray((100,), float64) range=[0, 0.02]
Sizes (_num*) (6)
KeyValue
_numdtint = 1
_numlastspikeint = 100
_numnot_refractoryint = 100
_numtint = 1
_numvint = 100
_numv0int = 100
Constants (scalars) (3)
KeyValue
Nint = 100
dtfloat = 0.0001
taufloat = 0.01
Variable Objects (_var_*) (8)
KeyValue
_var_NConstant
_var_dtArrayVariable
_var_lastspikeArrayVariable
_var_not_refractoryArrayVariable
_var_tArrayVariable
_var_tauConstant
_var_vArrayVariable
_var_v0ArrayVariable
Other (2)
KeyValue
_ownerNeuronGroup
logical_notufunc
" + ], + "text/plain": [ + "============================================================\n", + "INSPECT: neurongroup_stateupdater_codeobject.run\n", + "============================================================\n", + "\n", + "--- neurongroup_stateupdater_codeobject.run ---\n", + "#ifndef _BRIAN_CPPYY_SC_41c7dc13377ec1af\n", + "#define _BRIAN_CPPYY_SC_41c7dc13377ec1af\n", + "// Per-codeobject support code\n", + "\n", + "inline int64_t _timestep(double t, double dt) {\n", + " return (int64_t)((t + 1e-3*dt)/dt);\n", + "}\n", + "\n", + "// Template-specific support code (e.g. synaptic queue access)\n", + "\n", + "#endif // _BRIAN_CPPYY_SC_41c7dc13377ec1af\n", + "\n", + "extern \"C\" void _brian_cppyy_run_neurongroup_stateupdater_codeobject(int64_t N, double* _ptr_array_defaultclock_dt, double* _ptr_array_neurongroup_lastspike, int _numlastspike, int8_t* _ptr_array_neurongroup_not_refractory, int _numnot_refractory, double* _ptr_array_defaultclock_t, double tau, double* _ptr_array_neurongroup_v, int _numv, double* _ptr_array_neurongroup_v0, int _numv0) {\n", + " \n", + " // scalar code (runs once, outside the loop)\n", + " const size_t _vectorisation_idx = -1;\n", + " \n", + " const double dt = _ptr_array_defaultclock_dt[0];\n", + " const double t = _ptr_array_defaultclock_t[0];\n", + " const int64_t _lio_1 = _timestep(0.005, dt);\n", + " const double _lio_2 = 1.0f*(- dt)/tau;\n", + " const double _lio_3 = exp(_lio_2);\n", + "\n", + "\n", + " const int _N = N;\n", + "\n", + " // vector code (runs per neuron)\n", + " for (int _idx = 0; _idx < _N; _idx++) {\n", + " const size_t _vectorisation_idx = _idx;\n", + " \n", + " const double lastspike = _ptr_array_neurongroup_lastspike[_idx];\n", + " char not_refractory = _ptr_array_neurongroup_not_refractory[_idx];\n", + " double v = _ptr_array_neurongroup_v[_idx];\n", + " const double v0 = _ptr_array_neurongroup_v0[_idx];\n", + " not_refractory = _timestep(t - lastspike, dt) >= _lio_1;\n", + " double _v;\n", + " if(!not_refractory)\n", + " _v = (v + v0) - v0;\n", + " else \n", + " _v = ((_lio_3 * v) + v0) - (_lio_3 * v0);\n", + " if(not_refractory)\n", + " v = _v;\n", + " _ptr_array_neurongroup_not_refractory[_idx] = not_refractory;\n", + " _ptr_array_neurongroup_v[_idx] = v;\n", + "\n", + " }\n", + "}\n", + "\n", + "Parameter Mapping: neurongroup_stateupdater_codeobject.run\n", + "\n", + " [ 0] int64_t N <- ns[N] = int = 100\n", + " [ 1] double* _ptr_array_defaultclock_dt <- ns[_ptr_array_defaultclock_dt] = ndarray((1,), float64) = [0.0001]\n", + " [ 2] double* _ptr_array_neurongroup_lastspike <- ns[_ptr_array_neurongroup_lastspike] = ndarray((100,), float64) range=[-1e+04, -1e+04]\n", + " [ 3] int _numlastspike <- ns[_numlastspike] = int = 100\n", + " [ 4] int8_t* _ptr_array_neurongroup_not_refractory <- ns[_ptr_array_neurongroup_not_refractory] = ndarray((100,), bool) range=[1, 1]\n", + " [ 5] int _numnot_refractory <- ns[_numnot_refractory] = int = 100\n", + " [ 6] double* _ptr_array_defaultclock_t <- ns[_ptr_array_defaultclock_t] = ndarray((1,), float64) = [0.1]\n", + " [ 7] double tau <- ns[tau] = float = 0.01\n", + " [ 8] double* _ptr_array_neurongroup_v <- ns[_ptr_array_neurongroup_v] = ndarray((100,), float64) range=[0, 0.009957]\n", + " [ 9] int _numv <- ns[_numv] = int = 100\n", + " [10] double* _ptr_array_neurongroup_v0 <- ns[_ptr_array_neurongroup_v0] = ndarray((100,), float64) range=[0, 0.02]\n", + " [11] int _numv0 <- ns[_numv0] = int = 100\n", + "\n", + "Namespace: neurongroup_stateupdater_codeobject\n", + "\n", + " [Arrays (data pointers)]\n", + " _ptr_array_defaultclock_dt ndarray((1,), float64) = [0.0001]\n", + " _ptr_array_defaultclock_t ndarray((1,), float64) = [0.1]\n", + " _ptr_array_neurongroup_lastspike ndarray((100,), float64) range=[-1e+04, -1e+04]\n", + " _ptr_array_neurongroup_not_refractory ndarray((100,), bool) range=[1, 1]\n", + " _ptr_array_neurongroup_v ndarray((100,), float64) range=[0, 0.009957]\n", + " _ptr_array_neurongroup_v0 ndarray((100,), float64) range=[0, 0.02]\n", + "\n", + " [Sizes (_num*)]\n", + " _numdt int = 1\n", + " _numlastspike int = 100\n", + " _numnot_refractory int = 100\n", + " _numt int = 1\n", + " _numv int = 100\n", + " _numv0 int = 100\n", + "\n", + " [Constants (scalars)]\n", + " N int = 100\n", + " dt float = 0.0001\n", + " tau float = 0.01\n", + "\n", + " [Variable Objects (_var_*)]\n", + " _var_N Constant\n", + " _var_dt ArrayVariable\n", + " _var_lastspike ArrayVariable\n", + " _var_not_refractory ArrayVariable\n", + " _var_t ArrayVariable\n", + " _var_tau Constant\n", + " _var_v ArrayVariable\n", + " _var_v0 ArrayVariable\n", + "\n", + " [Other]\n", + " _owner NeuronGroup\n", + " logical_not ufunc" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# ---- 2. Inspect the state updater ----\n", + "# Using glob pattern — \"stateupdater*\" matches the full name\n", + "intro.inspect(\"*stateupdater*\")" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "94454327", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "

Parameter Mapping: neurongroup_stateupdater_codeobject.run

#C++ TypeParameter NameNamespace KeyCurrent Value
0int64_tNNint = 100
1double*_ptr_array_defaultclock_dt_ptr_array_defaultclock_dtndarray((1,), float64) = [0.0001]
2double*_ptr_array_neurongroup_lastspike_ptr_array_neurongroup_lastspikendarray((100,), float64) range=[-1e+04, -1e+04]
3int_numlastspike_numlastspikeint = 100
4int8_t*_ptr_array_neurongroup_not_refractory_ptr_array_neurongroup_not_refractoryndarray((100,), bool) range=[1, 1]
5int_numnot_refractory_numnot_refractoryint = 100
6double*_ptr_array_defaultclock_t_ptr_array_defaultclock_tndarray((1,), float64) = [0.1]
7doubletautaufloat = 0.01
8double*_ptr_array_neurongroup_v_ptr_array_neurongroup_vndarray((100,), float64) range=[0, 0.009957]
9int_numv_numvint = 100
10double*_ptr_array_neurongroup_v0_ptr_array_neurongroup_v0ndarray((100,), float64) range=[0, 0.02]
11int_numv0_numv0int = 100
" + ], + "text/plain": [ + "Parameter Mapping: neurongroup_stateupdater_codeobject.run\n", + "\n", + " [ 0] int64_t N <- ns[N] = int = 100\n", + " [ 1] double* _ptr_array_defaultclock_dt <- ns[_ptr_array_defaultclock_dt] = ndarray((1,), float64) = [0.0001]\n", + " [ 2] double* _ptr_array_neurongroup_lastspike <- ns[_ptr_array_neurongroup_lastspike] = ndarray((100,), float64) range=[-1e+04, -1e+04]\n", + " [ 3] int _numlastspike <- ns[_numlastspike] = int = 100\n", + " [ 4] int8_t* _ptr_array_neurongroup_not_refractory <- ns[_ptr_array_neurongroup_not_refractory] = ndarray((100,), bool) range=[1, 1]\n", + " [ 5] int _numnot_refractory <- ns[_numnot_refractory] = int = 100\n", + " [ 6] double* _ptr_array_defaultclock_t <- ns[_ptr_array_defaultclock_t] = ndarray((1,), float64) = [0.1]\n", + " [ 7] double tau <- ns[tau] = float = 0.01\n", + " [ 8] double* _ptr_array_neurongroup_v <- ns[_ptr_array_neurongroup_v] = ndarray((100,), float64) range=[0, 0.009957]\n", + " [ 9] int _numv <- ns[_numv] = int = 100\n", + " [10] double* _ptr_array_neurongroup_v0 <- ns[_ptr_array_neurongroup_v0] = ndarray((100,), float64) range=[0, 0.02]\n", + " [11] int _numv0 <- ns[_numv0] = int = 100" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# ---- 3. View just the params ----\n", + "intro.params(\"*stateupdater*\")" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "e84b1e06", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "

Namespace: neurongroup_stateupdater_codeobject

Arrays (data pointers) (6)
KeyValue
_ptr_array_defaultclock_dtndarray((1,), float64) = [0.0001]
_ptr_array_defaultclock_tndarray((1,), float64) = [0.1]
_ptr_array_neurongroup_lastspikendarray((100,), float64) range=[-1e+04, -1e+04]
_ptr_array_neurongroup_not_refractoryndarray((100,), bool) range=[1, 1]
_ptr_array_neurongroup_vndarray((100,), float64) range=[0, 0.009957]
_ptr_array_neurongroup_v0ndarray((100,), float64) range=[0, 0.02]
Sizes (_num*) (6)
KeyValue
_numdtint = 1
_numlastspikeint = 100
_numnot_refractoryint = 100
_numtint = 1
_numvint = 100
_numv0int = 100
Constants (scalars) (3)
KeyValue
Nint = 100
dtfloat = 0.0001
taufloat = 0.01
Variable Objects (_var_*) (8)
KeyValue
_var_NConstant
_var_dtArrayVariable
_var_lastspikeArrayVariable
_var_not_refractoryArrayVariable
_var_tArrayVariable
_var_tauConstant
_var_vArrayVariable
_var_v0ArrayVariable
Other (2)
KeyValue
_ownerNeuronGroup
logical_notufunc
" + ], + "text/plain": [ + "Namespace: neurongroup_stateupdater_codeobject\n", + "\n", + " [Arrays (data pointers)]\n", + " _ptr_array_defaultclock_dt ndarray((1,), float64) = [0.0001]\n", + " _ptr_array_defaultclock_t ndarray((1,), float64) = [0.1]\n", + " _ptr_array_neurongroup_lastspike ndarray((100,), float64) range=[-1e+04, -1e+04]\n", + " _ptr_array_neurongroup_not_refractory ndarray((100,), bool) range=[1, 1]\n", + " _ptr_array_neurongroup_v ndarray((100,), float64) range=[0, 0.009957]\n", + " _ptr_array_neurongroup_v0 ndarray((100,), float64) range=[0, 0.02]\n", + "\n", + " [Sizes (_num*)]\n", + " _numdt int = 1\n", + " _numlastspike int = 100\n", + " _numnot_refractory int = 100\n", + " _numt int = 1\n", + " _numv int = 100\n", + " _numv0 int = 100\n", + "\n", + " [Constants (scalars)]\n", + " N int = 100\n", + " dt float = 0.0001\n", + " tau float = 0.01\n", + "\n", + " [Variable Objects (_var_*)]\n", + " _var_N Constant\n", + " _var_dt ArrayVariable\n", + " _var_lastspike ArrayVariable\n", + " _var_not_refractory ArrayVariable\n", + " _var_t ArrayVariable\n", + " _var_tau Constant\n", + " _var_v ArrayVariable\n", + " _var_v0 ArrayVariable\n", + "\n", + " [Other]\n", + " _owner NeuronGroup\n", + " logical_not ufunc" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# ---- 4. View the namespace ----\n", + "intro.namespace(\"*stateupdater*\")" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "674aac98", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['_brian_cppyy_rng',\n", + " '_brian_cppyy_run_neurongroup_spike_resetter_codeobject',\n", + " '_brian_cppyy_run_neurongroup_spike_thresholder_codeobject',\n", + " '_brian_cppyy_run_neurongroup_stateupdater_codeobject',\n", + " '_brian_cppyy_run_statemonitor_codeobject']" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# ---- 5. View C++ globals ----\n", + "intro.cpp_globals()" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "cb14d40f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "============================================================\n", + "EVAL C++\n", + "============================================================\n", + "M_PI = 3.141592653589793\n", + "sizeof(double) = 8\n", + "_brian_mod(7, 3) = 1\n" + ] + } + ], + "source": [ + "# ---- 6. Evaluate a C++ expression ----\n", + "print(\"\\n\" + \"=\" * 60)\n", + "print(\"EVAL C++\")\n", + "print(\"=\" * 60)\n", + "print(f\"M_PI = {intro.eval_cpp('M_PI')}\")\n", + "print(f\"sizeof(double) = {intro.eval_cpp('sizeof(double)', 'size_t')}\")\n", + "print(f\"_brian_mod(7, 3) = {intro.eval_cpp('_brian_mod(7, 3)', 'int32_t')}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "17f5f212", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'\\n \\n // scalar code (runs once, outside the loop)\\n const size_t _vectorisation_idx = -1;\\n \\n const double dt = _ptr_array_defaultclock_dt[0];\\n const double t = _ptr_array_defaultclock_t[0];\\n const int64_t _lio_1 = _timestep(0.005, dt);\\n const double _lio_2 = 1.0f*(- dt)/tau;\\n const double _lio_3 = exp(_lio_2);\\n\\n\\n const int _N = N;\\n\\n // vector code (runs per neuron)\\n for (int _idx = 0; _idx < _N; _idx++) {\\n const size_t _vectorisation_idx = _idx;\\n \\n const double lastspike = _ptr_array_neurongroup_lastspike[_idx];\\n char not_refractory = _ptr_array_neurongroup_not_refractory[_idx];\\n double v = _ptr_array_neurongroup_v[_idx];\\n const double v0 = _ptr_array_neurongroup_v0[_idx];\\n not_refractory = _timestep(t - lastspike, dt) >= _lio_1;\\n double _v;\\n if(!not_refractory)\\n _v = (v + v0) - v0;\\n else \\n _v = ((_lio_3 * v) + v0) - (_lio_3 * v0);\\n if(not_refractory)\\n v = _v;\\n _ptr_array_neurongroup_not_refractory[_idx] = not_refractory;\\n _ptr_array_neurongroup_v[_idx] = v;\\n\\n }\\n'" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# --- Function body replacement ---\n", + "body = intro.get_body(\"*stateupdater*\", \"run\")\n", + "body" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "e5c1b14a", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO introspector: compiling _brian_cppyy_run_neurongroup_stateupdater_codeobject_v1 (replacing _brian_cppyy_run_neurongroup_stateupdater_codeobject) [brian2.codegen.runtime.cppyy_rt.introspector]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "============================================================\n", + "REPLACING WITH LINEAR APPROXIMATION\n", + "============================================================\n" + ] + }, + { + "data": { + "text/plain": [ + "'_brian_cppyy_run_neurongroup_stateupdater_codeobject_v1'" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "v_before = np.array(group.v[:])\n", + "new_body = body.replace(\"exp(_lio_2)\", \"(1.0 + _lio_2)\")\n", + "print(\"\\n\" + \"=\" * 60)\n", + "print(\"REPLACING WITH LINEAR APPROXIMATION\")\n", + "print(\"=\" * 60)\n", + "versioned_name = intro.replace_body(\"*stateupdater*\", \"run\", new_body)\n", + "versioned_name" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "b27307d3", + "metadata": {}, + "outputs": [], + "source": [ + "run(100 * ms)\n", + "v_after_mod = np.array(group.v[:])" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "83dfdade", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO introspector: restored original neurongroup_stateupdater_codeobject.run [brian2.codegen.runtime.cppyy_rt.introspector]\n" + ] + } + ], + "source": [ + "# --- Restore ---\n", + "intro.restore(\"*stateupdater*\", \"run\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "d71d2621", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO introspector: injected custom C++ code [brian2.codegen.runtime.cppyy_rt.introspector]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "leaky_relu(-5, 0.01) = -0.05\n", + "leaky_relu(3, 0.01) = 3.0\n" + ] + } + ], + "source": [ + "intro.inject_cpp(\"\"\"\n", + "inline double leaky_relu(double x, double alpha) {\n", + " return x > 0.0 ? x : alpha * x;\n", + "}\n", + "\"\"\")\n", + "print(f\"\\nleaky_relu(-5, 0.01) = {intro.eval_cpp('leaky_relu(-5.0, 0.01)')}\")\n", + "print(f\"leaky_relu(3, 0.01) = {intro.eval_cpp('leaky_relu(3.0, 0.01)')}\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "ba35148e", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
                               Compiled Code Objects                               \n",
+       "┏━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━┳━━━━━━━━┓\n",
+       "    Code Object                                 Template      Blocks  # Vars \n",
+       "┡━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━╇━━━━━━━━┩\n",
+       " neurongroup_spike_resetter_codeobject       reset         run          5 \n",
+       " neurongroup_spike_resetter_codeobject_1     reset         run          5 \n",
+       " neurongroup_spike_thresholder_codeobject    threshold     run          8 \n",
+       " neurongroup_spike_thresholder_codeobject_1  threshold     run          8 \n",
+       " neurongroup_stateupdater_codeobject         stateupdate   run         11 \n",
+       " neurongroup_stateupdater_codeobject_1       stateupdate   run         11 \n",
+       " statemonitor_codeobject                     statemonitor  run          8 \n",
+       " statemonitor_codeobject_1                   statemonitor  run          8 \n",
+       "└───┴────────────────────────────────────────────┴──────────────┴────────┴────────┘\n",
+       "
\n" + ], + "text/plain": [ + "\u001b[3m \u001b[0m\u001b[1;3mCompiled Code Objects\u001b[0m\u001b[3m \u001b[0m\n", + "\u001b[2m┏━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━┳━━━━━━━━┓\u001b[0m\n", + "\u001b[2m┃\u001b[0m\u001b[1;36m \u001b[0m\u001b[1;36m \u001b[0m\u001b[1;36m \u001b[0m\u001b[2m┃\u001b[0m\u001b[1;36m \u001b[0m\u001b[1;36mCode Object \u001b[0m\u001b[1;36m \u001b[0m\u001b[2m┃\u001b[0m\u001b[1;36m \u001b[0m\u001b[1;36mTemplate \u001b[0m\u001b[1;36m \u001b[0m\u001b[2m┃\u001b[0m\u001b[1;36m \u001b[0m\u001b[1;36mBlocks\u001b[0m\u001b[1;36m \u001b[0m\u001b[2m┃\u001b[0m\u001b[1;36m \u001b[0m\u001b[1;36m# Vars\u001b[0m\u001b[1;36m \u001b[0m\u001b[2m┃\u001b[0m\n", + "\u001b[2m┡━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━╇━━━━━━━━┩\u001b[0m\n", + "\u001b[2m│\u001b[0m ● \u001b[2m│\u001b[0m\u001b[32m \u001b[0m\u001b[32mneurongroup_spike_resetter_codeobject \u001b[0m\u001b[32m \u001b[0m\u001b[2m│\u001b[0m\u001b[33m \u001b[0m\u001b[33mreset \u001b[0m\u001b[33m \u001b[0m\u001b[2m│\u001b[0m run \u001b[2m│\u001b[0m 5 \u001b[2m│\u001b[0m\n", + "\u001b[2m│\u001b[0m ● \u001b[2m│\u001b[0m\u001b[32m \u001b[0m\u001b[32mneurongroup_spike_resetter_codeobject_1 \u001b[0m\u001b[32m \u001b[0m\u001b[2m│\u001b[0m\u001b[33m \u001b[0m\u001b[33mreset \u001b[0m\u001b[33m \u001b[0m\u001b[2m│\u001b[0m run \u001b[2m│\u001b[0m 5 \u001b[2m│\u001b[0m\n", + "\u001b[2m│\u001b[0m ● \u001b[2m│\u001b[0m\u001b[32m \u001b[0m\u001b[32mneurongroup_spike_thresholder_codeobject \u001b[0m\u001b[32m \u001b[0m\u001b[2m│\u001b[0m\u001b[33m \u001b[0m\u001b[33mthreshold \u001b[0m\u001b[33m \u001b[0m\u001b[2m│\u001b[0m run \u001b[2m│\u001b[0m 8 \u001b[2m│\u001b[0m\n", + "\u001b[2m│\u001b[0m ● \u001b[2m│\u001b[0m\u001b[32m \u001b[0m\u001b[32mneurongroup_spike_thresholder_codeobject_1\u001b[0m\u001b[32m \u001b[0m\u001b[2m│\u001b[0m\u001b[33m \u001b[0m\u001b[33mthreshold \u001b[0m\u001b[33m \u001b[0m\u001b[2m│\u001b[0m run \u001b[2m│\u001b[0m 8 \u001b[2m│\u001b[0m\n", + "\u001b[2m│\u001b[0m ● \u001b[2m│\u001b[0m\u001b[32m \u001b[0m\u001b[32mneurongroup_stateupdater_codeobject \u001b[0m\u001b[32m \u001b[0m\u001b[2m│\u001b[0m\u001b[33m \u001b[0m\u001b[33mstateupdate \u001b[0m\u001b[33m \u001b[0m\u001b[2m│\u001b[0m run \u001b[2m│\u001b[0m 11 \u001b[2m│\u001b[0m\n", + "\u001b[2m│\u001b[0m ● \u001b[2m│\u001b[0m\u001b[32m \u001b[0m\u001b[32mneurongroup_stateupdater_codeobject_1 \u001b[0m\u001b[32m \u001b[0m\u001b[2m│\u001b[0m\u001b[33m \u001b[0m\u001b[33mstateupdate \u001b[0m\u001b[33m \u001b[0m\u001b[2m│\u001b[0m run \u001b[2m│\u001b[0m 11 \u001b[2m│\u001b[0m\n", + "\u001b[2m│\u001b[0m ● \u001b[2m│\u001b[0m\u001b[32m \u001b[0m\u001b[32mstatemonitor_codeobject \u001b[0m\u001b[32m \u001b[0m\u001b[2m│\u001b[0m\u001b[33m \u001b[0m\u001b[33mstatemonitor\u001b[0m\u001b[33m \u001b[0m\u001b[2m│\u001b[0m run \u001b[2m│\u001b[0m 8 \u001b[2m│\u001b[0m\n", + "\u001b[2m│\u001b[0m ● \u001b[2m│\u001b[0m\u001b[32m \u001b[0m\u001b[32mstatemonitor_codeobject_1 \u001b[0m\u001b[32m \u001b[0m\u001b[2m│\u001b[0m\u001b[33m \u001b[0m\u001b[33mstatemonitor\u001b[0m\u001b[33m \u001b[0m\u001b[2m│\u001b[0m run \u001b[2m│\u001b[0m 8 \u001b[2m│\u001b[0m\n", + "\u001b[2m└───┴────────────────────────────────────────────┴──────────────┴────────┴────────┘\u001b[0m\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "intro.print_objects()" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "449ebe57", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'name': 'neurongroup_stateupdater_codeobject',\n", + " 'sources': {'run': '#ifndef _BRIAN_CPPYY_SC_41c7dc13377ec1af\\n#define _BRIAN_CPPYY_SC_41c7dc13377ec1af\\n// Per-codeobject support code\\n\\ninline int64_t _timestep(double t, double dt) {\\n return (int64_t)((t + 1e-3*dt)/dt);\\n}\\n\\n// Template-specific support code (e.g. synaptic queue access)\\n\\n#endif // _BRIAN_CPPYY_SC_41c7dc13377ec1af\\n\\nextern \"C\" void _brian_cppyy_run_neurongroup_stateupdater_codeobject(int64_t N, double* _ptr_array_defaultclock_dt, double* _ptr_array_neurongroup_lastspike, int _numlastspike, int8_t* _ptr_array_neurongroup_not_refractory, int _numnot_refractory, double* _ptr_array_defaultclock_t, double tau, double* _ptr_array_neurongroup_v, int _numv, double* _ptr_array_neurongroup_v0, int _numv0) {\\n \\n // scalar code (runs once, outside the loop)\\n const size_t _vectorisation_idx = -1;\\n \\n const double dt = _ptr_array_defaultclock_dt[0];\\n const double t = _ptr_array_defaultclock_t[0];\\n const int64_t _lio_1 = _timestep(0.005, dt);\\n const double _lio_2 = 1.0f*(- dt)/tau;\\n const double _lio_3 = exp(_lio_2);\\n\\n\\n const int _N = N;\\n\\n // vector code (runs per neuron)\\n for (int _idx = 0; _idx < _N; _idx++) {\\n const size_t _vectorisation_idx = _idx;\\n \\n const double lastspike = _ptr_array_neurongroup_lastspike[_idx];\\n char not_refractory = _ptr_array_neurongroup_not_refractory[_idx];\\n double v = _ptr_array_neurongroup_v[_idx];\\n const double v0 = _ptr_array_neurongroup_v0[_idx];\\n not_refractory = _timestep(t - lastspike, dt) >= _lio_1;\\n double _v;\\n if(!not_refractory)\\n _v = (v + v0) - v0;\\n else \\n _v = ((_lio_3 * v) + v0) - (_lio_3 * v0);\\n if(not_refractory)\\n v = _v;\\n _ptr_array_neurongroup_not_refractory[_idx] = not_refractory;\\n _ptr_array_neurongroup_v[_idx] = v;\\n\\n }\\n}'},\n", + " 'versions': {},\n", + " 'arrays': {'_ptr_array_defaultclock_dt': {'shape': (1,),\n", + " 'dtype': 'float64',\n", + " 'min': 0.0001,\n", + " 'max': 0.0001,\n", + " 'mean': 0.0001},\n", + " '_ptr_array_neurongroup_lastspike': {'shape': (100,),\n", + " 'dtype': 'float64',\n", + " 'min': -10000.0,\n", + " 'max': -10000.0,\n", + " 'mean': -10000.0},\n", + " '_ptr_array_neurongroup_not_refractory': {'shape': (100,),\n", + " 'dtype': 'bool',\n", + " 'min': 1.0,\n", + " 'max': 1.0,\n", + " 'mean': 1.0},\n", + " '_ptr_array_defaultclock_t': {'shape': (1,),\n", + " 'dtype': 'float64',\n", + " 'min': 0.2,\n", + " 'max': 0.2,\n", + " 'mean': 0.2},\n", + " '_ptr_array_neurongroup_v': {'shape': (100,),\n", + " 'dtype': 'float64',\n", + " 'min': 0.0,\n", + " 'max': 0.009908746662274415,\n", + " 'mean': 0.005471325415868882},\n", + " '_ptr_array_neurongroup_v0': {'shape': (100,),\n", + " 'dtype': 'float64',\n", + " 'min': 0.0,\n", + " 'max': 0.02,\n", + " 'mean': 0.01}}}" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# --- Snapshot ---\n", + "intro.snapshot(\"*stateupdater*\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fdf40611", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.5" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 5a2320b69ce91759bb4c9c3efc52ebdcf0d60c2f Mon Sep 17 00:00:00 2001 From: Legend101Zz <96632943+Legend101Zz@users.noreply.github.com> Date: Sat, 14 Feb 2026 16:07:19 +0530 Subject: [PATCH 06/29] fix: remove unneeded code --- .../codegen/runtime/cppyy_rt/introspector.py | 20 ++----------------- 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/brian2/codegen/runtime/cppyy_rt/introspector.py b/brian2/codegen/runtime/cppyy_rt/introspector.py index 52b3de73c..12b2edd42 100644 --- a/brian2/codegen/runtime/cppyy_rt/introspector.py +++ b/brian2/codegen/runtime/cppyy_rt/introspector.py @@ -86,8 +86,6 @@ def get_instance(cls) -> CppyyIntrospector | None: def reset(cls) -> None: cls._instance = None - # === Registration === - def register(self, codeobj: Any, block: str, source: str) -> None: name: str = codeobj.name self._objects[name] = codeobj @@ -98,8 +96,6 @@ def register(self, codeobj: Any, block: str, source: str) -> None: self._registration_order.append(name) logger.diagnostic(f"introspector: registered {name}.{block}") - # === Name resolution === - def _resolve_name(self, pattern: str) -> str: """ Resolve a name or glob pattern to a single code object name. @@ -147,8 +143,6 @@ def _resolve_names(self, pattern: str) -> list[str]: return sorted(self._objects.keys()) return sorted(name for name in self._objects if fnmatch(name, pattern)) - # === Inspection === - def list_objects(self, pattern: str = "*") -> ObjectListDisplay: """List all registered code objects, their blocks, and template types.""" rows: list[dict[str, str]] = [] @@ -253,8 +247,6 @@ def cpp_globals(self) -> list[str]: cppyy = _get_cppyy() return sorted(x for x in dir(cppyy.gbl) if "_brian_" in x) - # === Modification === - def get_body(self, pattern: str, block: str = "run") -> str: """Extract just the function body, ready for editing.""" name: str = self._resolve_name(pattern) @@ -391,8 +383,6 @@ def snapshot(self, pattern: str) -> dict[str, Any]: "arrays": array_snapshot, } - # === Rich CLI display === - def print_objects(self, pattern: str = "*") -> None: """Pretty-print all code objects to the terminal.""" display = self.list_objects(pattern) @@ -433,8 +423,6 @@ def print_inspect(self, pattern: str, block: str = "run") -> None: else: print(repr(display)) - # === Internal === - def _get_func_name(self, name: str, block: str) -> str: safe: str = name.replace(".", "_").replace("*", "").replace("-", "_") return f"_brian_cppyy_{block}_{safe}" @@ -443,9 +431,7 @@ def _repr_html_(self) -> str: return self.list_objects()._repr_html_() -# ========================================================================= # Rich CLI renderers (only used when `rich` is installed) -# ========================================================================= def _rich_print_objects(display: ObjectListDisplay) -> None: @@ -545,9 +531,7 @@ def _rich_print_inspect(display: InspectDisplay) -> None: _rich_print_namespace(display.namespace) -# ========================================================================= # Value description helper -# ========================================================================= def _describe_value(val: Any) -> str: @@ -598,9 +582,9 @@ def _extract_function_parts(source: str, func_name: str) -> tuple[str, str, str] raise ValueError(f"Unmatched braces in function '{func_name}'") -# ========================================================================= +# ======================================================================== # Jupyter HTML display classes -# ========================================================================= +# ======================================================================== _DISPLAY_CSS: str = """ \n", - "

Compiled Code Objects

Code ObjectTemplateCompiled Blocks# Variables
neurongroup_spike_resetter_codeobjectresetrun5
neurongroup_spike_thresholder_codeobjectthresholdrun8
neurongroup_stateupdater_codeobjectstateupdaterun11
statemonitor_codeobjectstatemonitorrun8
" - ], - "text/plain": [ - "Compiled Code Objects:\n", - "\n", - " ● neurongroup_spike_resetter_codeobject template=reset blocks=[run] vars=5\n", - " ● neurongroup_spike_thresholder_codeobject template=threshold blocks=[run] vars=8\n", - " ● neurongroup_stateupdater_codeobject template=stateupdate blocks=[run] vars=11\n", - " ● statemonitor_codeobject template=statemonitor blocks=[run] vars=8" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# ---- 1. List all compiled code objects ----\n", - "intro.list_objects()" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "518d84c6", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "\n", - "

Inspect: neurongroup_stateupdater_codeobject.run

C++ Source
#ifndef _BRIAN_CPPYY_SC_41c7dc13377ec1af\n",
-       "#define _BRIAN_CPPYY_SC_41c7dc13377ec1af\n",
-       "// Per-codeobject support code\n",
-       "\n",
-       "inline int64_t _timestep(double t, double dt) {\n",
-       "    return (int64_t)((t + 1e-3*dt)/dt);\n",
-       "}\n",
-       "\n",
-       "// Template-specific support code (e.g. synaptic queue access)\n",
-       "\n",
-       "#endif // _BRIAN_CPPYY_SC_41c7dc13377ec1af\n",
-       "\n",
-       "extern "C" void _brian_cppyy_run_neurongroup_stateupdater_codeobject(int64_t N, double* _ptr_array_defaultclock_dt, double* _ptr_array_neurongroup_lastspike, int _numlastspike, int8_t* _ptr_array_neurongroup_not_refractory, int _numnot_refractory, double* _ptr_array_defaultclock_t, double tau, double* _ptr_array_neurongroup_v, int _numv, double* _ptr_array_neurongroup_v0, int _numv0) {\n",
-       "    \n",
-       "    // scalar code (runs once, outside the loop)\n",
-       "    const size_t _vectorisation_idx = -1;\n",
-       "        \n",
-       "    const double dt = _ptr_array_defaultclock_dt[0];\n",
-       "    const double t = _ptr_array_defaultclock_t[0];\n",
-       "    const int64_t _lio_1 = _timestep(0.005, dt);\n",
-       "    const double _lio_2 = 1.0f*(- dt)/tau;\n",
-       "    const double _lio_3 = exp(_lio_2);\n",
-       "\n",
-       "\n",
-       "    const int _N = N;\n",
-       "\n",
-       "    // vector code (runs per neuron)\n",
-       "    for (int _idx = 0; _idx < _N; _idx++) {\n",
-       "        const size_t _vectorisation_idx = _idx;\n",
-       "                \n",
-       "        const double lastspike = _ptr_array_neurongroup_lastspike[_idx];\n",
-       "        char not_refractory = _ptr_array_neurongroup_not_refractory[_idx];\n",
-       "        double v = _ptr_array_neurongroup_v[_idx];\n",
-       "        const double v0 = _ptr_array_neurongroup_v0[_idx];\n",
-       "        not_refractory = _timestep(t - lastspike, dt) >= _lio_1;\n",
-       "        double _v;\n",
-       "        if(!not_refractory)\n",
-       "            _v = (v + v0) - v0;\n",
-       "        else \n",
-       "            _v = ((_lio_3 * v) + v0) - (_lio_3 * v0);\n",
-       "        if(not_refractory)\n",
-       "            v = _v;\n",
-       "        _ptr_array_neurongroup_not_refractory[_idx] = not_refractory;\n",
-       "        _ptr_array_neurongroup_v[_idx] = v;\n",
-       "\n",
-       "    }\n",
-       "}
Parameter Mapping\n", - "\n", - "

Parameter Mapping: neurongroup_stateupdater_codeobject.run

#C++ TypeParameter NameNamespace KeyCurrent Value
0int64_tNNint = 100
1double*_ptr_array_defaultclock_dt_ptr_array_defaultclock_dtndarray((1,), float64) = [0.0001]
2double*_ptr_array_neurongroup_lastspike_ptr_array_neurongroup_lastspikendarray((100,), float64) range=[-1e+04, -1e+04]
3int_numlastspike_numlastspikeint = 100
4int8_t*_ptr_array_neurongroup_not_refractory_ptr_array_neurongroup_not_refractoryndarray((100,), bool) range=[1, 1]
5int_numnot_refractory_numnot_refractoryint = 100
6double*_ptr_array_defaultclock_t_ptr_array_defaultclock_tndarray((1,), float64) = [0.1]
7doubletautaufloat = 0.01
8double*_ptr_array_neurongroup_v_ptr_array_neurongroup_vndarray((100,), float64) range=[0, 0.009957]
9int_numv_numvint = 100
10double*_ptr_array_neurongroup_v0_ptr_array_neurongroup_v0ndarray((100,), float64) range=[0, 0.02]
11int_numv0_numv0int = 100
Namespace (click to expand)\n", - "\n", - "

Namespace: neurongroup_stateupdater_codeobject

Arrays (data pointers) (6)
KeyValue
_ptr_array_defaultclock_dtndarray((1,), float64) = [0.0001]
_ptr_array_defaultclock_tndarray((1,), float64) = [0.1]
_ptr_array_neurongroup_lastspikendarray((100,), float64) range=[-1e+04, -1e+04]
_ptr_array_neurongroup_not_refractoryndarray((100,), bool) range=[1, 1]
_ptr_array_neurongroup_vndarray((100,), float64) range=[0, 0.009957]
_ptr_array_neurongroup_v0ndarray((100,), float64) range=[0, 0.02]
Sizes (_num*) (6)
KeyValue
_numdtint = 1
_numlastspikeint = 100
_numnot_refractoryint = 100
_numtint = 1
_numvint = 100
_numv0int = 100
Constants (scalars) (3)
KeyValue
Nint = 100
dtfloat = 0.0001
taufloat = 0.01
Variable Objects (_var_*) (8)
KeyValue
_var_NConstant
_var_dtArrayVariable
_var_lastspikeArrayVariable
_var_not_refractoryArrayVariable
_var_tArrayVariable
_var_tauConstant
_var_vArrayVariable
_var_v0ArrayVariable
Other (2)
KeyValue
_ownerNeuronGroup
logical_notufunc
" - ], - "text/plain": [ - "============================================================\n", - "INSPECT: neurongroup_stateupdater_codeobject.run\n", - "============================================================\n", - "\n", - "--- neurongroup_stateupdater_codeobject.run ---\n", - "#ifndef _BRIAN_CPPYY_SC_41c7dc13377ec1af\n", - "#define _BRIAN_CPPYY_SC_41c7dc13377ec1af\n", - "// Per-codeobject support code\n", - "\n", - "inline int64_t _timestep(double t, double dt) {\n", - " return (int64_t)((t + 1e-3*dt)/dt);\n", - "}\n", - "\n", - "// Template-specific support code (e.g. synaptic queue access)\n", - "\n", - "#endif // _BRIAN_CPPYY_SC_41c7dc13377ec1af\n", - "\n", - "extern \"C\" void _brian_cppyy_run_neurongroup_stateupdater_codeobject(int64_t N, double* _ptr_array_defaultclock_dt, double* _ptr_array_neurongroup_lastspike, int _numlastspike, int8_t* _ptr_array_neurongroup_not_refractory, int _numnot_refractory, double* _ptr_array_defaultclock_t, double tau, double* _ptr_array_neurongroup_v, int _numv, double* _ptr_array_neurongroup_v0, int _numv0) {\n", - " \n", - " // scalar code (runs once, outside the loop)\n", - " const size_t _vectorisation_idx = -1;\n", - " \n", - " const double dt = _ptr_array_defaultclock_dt[0];\n", - " const double t = _ptr_array_defaultclock_t[0];\n", - " const int64_t _lio_1 = _timestep(0.005, dt);\n", - " const double _lio_2 = 1.0f*(- dt)/tau;\n", - " const double _lio_3 = exp(_lio_2);\n", - "\n", - "\n", - " const int _N = N;\n", - "\n", - " // vector code (runs per neuron)\n", - " for (int _idx = 0; _idx < _N; _idx++) {\n", - " const size_t _vectorisation_idx = _idx;\n", - " \n", - " const double lastspike = _ptr_array_neurongroup_lastspike[_idx];\n", - " char not_refractory = _ptr_array_neurongroup_not_refractory[_idx];\n", - " double v = _ptr_array_neurongroup_v[_idx];\n", - " const double v0 = _ptr_array_neurongroup_v0[_idx];\n", - " not_refractory = _timestep(t - lastspike, dt) >= _lio_1;\n", - " double _v;\n", - " if(!not_refractory)\n", - " _v = (v + v0) - v0;\n", - " else \n", - " _v = ((_lio_3 * v) + v0) - (_lio_3 * v0);\n", - " if(not_refractory)\n", - " v = _v;\n", - " _ptr_array_neurongroup_not_refractory[_idx] = not_refractory;\n", - " _ptr_array_neurongroup_v[_idx] = v;\n", - "\n", - " }\n", - "}\n", - "\n", - "Parameter Mapping: neurongroup_stateupdater_codeobject.run\n", - "\n", - " [ 0] int64_t N <- ns[N] = int = 100\n", - " [ 1] double* _ptr_array_defaultclock_dt <- ns[_ptr_array_defaultclock_dt] = ndarray((1,), float64) = [0.0001]\n", - " [ 2] double* _ptr_array_neurongroup_lastspike <- ns[_ptr_array_neurongroup_lastspike] = ndarray((100,), float64) range=[-1e+04, -1e+04]\n", - " [ 3] int _numlastspike <- ns[_numlastspike] = int = 100\n", - " [ 4] int8_t* _ptr_array_neurongroup_not_refractory <- ns[_ptr_array_neurongroup_not_refractory] = ndarray((100,), bool) range=[1, 1]\n", - " [ 5] int _numnot_refractory <- ns[_numnot_refractory] = int = 100\n", - " [ 6] double* _ptr_array_defaultclock_t <- ns[_ptr_array_defaultclock_t] = ndarray((1,), float64) = [0.1]\n", - " [ 7] double tau <- ns[tau] = float = 0.01\n", - " [ 8] double* _ptr_array_neurongroup_v <- ns[_ptr_array_neurongroup_v] = ndarray((100,), float64) range=[0, 0.009957]\n", - " [ 9] int _numv <- ns[_numv] = int = 100\n", - " [10] double* _ptr_array_neurongroup_v0 <- ns[_ptr_array_neurongroup_v0] = ndarray((100,), float64) range=[0, 0.02]\n", - " [11] int _numv0 <- ns[_numv0] = int = 100\n", - "\n", - "Namespace: neurongroup_stateupdater_codeobject\n", - "\n", - " [Arrays (data pointers)]\n", - " _ptr_array_defaultclock_dt ndarray((1,), float64) = [0.0001]\n", - " _ptr_array_defaultclock_t ndarray((1,), float64) = [0.1]\n", - " _ptr_array_neurongroup_lastspike ndarray((100,), float64) range=[-1e+04, -1e+04]\n", - " _ptr_array_neurongroup_not_refractory ndarray((100,), bool) range=[1, 1]\n", - " _ptr_array_neurongroup_v ndarray((100,), float64) range=[0, 0.009957]\n", - " _ptr_array_neurongroup_v0 ndarray((100,), float64) range=[0, 0.02]\n", - "\n", - " [Sizes (_num*)]\n", - " _numdt int = 1\n", - " _numlastspike int = 100\n", - " _numnot_refractory int = 100\n", - " _numt int = 1\n", - " _numv int = 100\n", - " _numv0 int = 100\n", - "\n", - " [Constants (scalars)]\n", - " N int = 100\n", - " dt float = 0.0001\n", - " tau float = 0.01\n", - "\n", - " [Variable Objects (_var_*)]\n", - " _var_N Constant\n", - " _var_dt ArrayVariable\n", - " _var_lastspike ArrayVariable\n", - " _var_not_refractory ArrayVariable\n", - " _var_t ArrayVariable\n", - " _var_tau Constant\n", - " _var_v ArrayVariable\n", - " _var_v0 ArrayVariable\n", - "\n", - " [Other]\n", - " _owner NeuronGroup\n", - " logical_not ufunc" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# ---- 2. Inspect the state updater ----\n", - "# Using glob pattern — \"stateupdater*\" matches the full name\n", - "intro.inspect(\"*stateupdater*\")" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "94454327", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "\n", - "

Parameter Mapping: neurongroup_stateupdater_codeobject.run

#C++ TypeParameter NameNamespace KeyCurrent Value
0int64_tNNint = 100
1double*_ptr_array_defaultclock_dt_ptr_array_defaultclock_dtndarray((1,), float64) = [0.0001]
2double*_ptr_array_neurongroup_lastspike_ptr_array_neurongroup_lastspikendarray((100,), float64) range=[-1e+04, -1e+04]
3int_numlastspike_numlastspikeint = 100
4int8_t*_ptr_array_neurongroup_not_refractory_ptr_array_neurongroup_not_refractoryndarray((100,), bool) range=[1, 1]
5int_numnot_refractory_numnot_refractoryint = 100
6double*_ptr_array_defaultclock_t_ptr_array_defaultclock_tndarray((1,), float64) = [0.1]
7doubletautaufloat = 0.01
8double*_ptr_array_neurongroup_v_ptr_array_neurongroup_vndarray((100,), float64) range=[0, 0.009957]
9int_numv_numvint = 100
10double*_ptr_array_neurongroup_v0_ptr_array_neurongroup_v0ndarray((100,), float64) range=[0, 0.02]
11int_numv0_numv0int = 100
" - ], - "text/plain": [ - "Parameter Mapping: neurongroup_stateupdater_codeobject.run\n", - "\n", - " [ 0] int64_t N <- ns[N] = int = 100\n", - " [ 1] double* _ptr_array_defaultclock_dt <- ns[_ptr_array_defaultclock_dt] = ndarray((1,), float64) = [0.0001]\n", - " [ 2] double* _ptr_array_neurongroup_lastspike <- ns[_ptr_array_neurongroup_lastspike] = ndarray((100,), float64) range=[-1e+04, -1e+04]\n", - " [ 3] int _numlastspike <- ns[_numlastspike] = int = 100\n", - " [ 4] int8_t* _ptr_array_neurongroup_not_refractory <- ns[_ptr_array_neurongroup_not_refractory] = ndarray((100,), bool) range=[1, 1]\n", - " [ 5] int _numnot_refractory <- ns[_numnot_refractory] = int = 100\n", - " [ 6] double* _ptr_array_defaultclock_t <- ns[_ptr_array_defaultclock_t] = ndarray((1,), float64) = [0.1]\n", - " [ 7] double tau <- ns[tau] = float = 0.01\n", - " [ 8] double* _ptr_array_neurongroup_v <- ns[_ptr_array_neurongroup_v] = ndarray((100,), float64) range=[0, 0.009957]\n", - " [ 9] int _numv <- ns[_numv] = int = 100\n", - " [10] double* _ptr_array_neurongroup_v0 <- ns[_ptr_array_neurongroup_v0] = ndarray((100,), float64) range=[0, 0.02]\n", - " [11] int _numv0 <- ns[_numv0] = int = 100" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# ---- 3. View just the params ----\n", - "intro.params(\"*stateupdater*\")" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "e84b1e06", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "\n", - "

Namespace: neurongroup_stateupdater_codeobject

Arrays (data pointers) (6)
KeyValue
_ptr_array_defaultclock_dtndarray((1,), float64) = [0.0001]
_ptr_array_defaultclock_tndarray((1,), float64) = [0.1]
_ptr_array_neurongroup_lastspikendarray((100,), float64) range=[-1e+04, -1e+04]
_ptr_array_neurongroup_not_refractoryndarray((100,), bool) range=[1, 1]
_ptr_array_neurongroup_vndarray((100,), float64) range=[0, 0.009957]
_ptr_array_neurongroup_v0ndarray((100,), float64) range=[0, 0.02]
Sizes (_num*) (6)
KeyValue
_numdtint = 1
_numlastspikeint = 100
_numnot_refractoryint = 100
_numtint = 1
_numvint = 100
_numv0int = 100
Constants (scalars) (3)
KeyValue
Nint = 100
dtfloat = 0.0001
taufloat = 0.01
Variable Objects (_var_*) (8)
KeyValue
_var_NConstant
_var_dtArrayVariable
_var_lastspikeArrayVariable
_var_not_refractoryArrayVariable
_var_tArrayVariable
_var_tauConstant
_var_vArrayVariable
_var_v0ArrayVariable
Other (2)
KeyValue
_ownerNeuronGroup
logical_notufunc
" - ], - "text/plain": [ - "Namespace: neurongroup_stateupdater_codeobject\n", - "\n", - " [Arrays (data pointers)]\n", - " _ptr_array_defaultclock_dt ndarray((1,), float64) = [0.0001]\n", - " _ptr_array_defaultclock_t ndarray((1,), float64) = [0.1]\n", - " _ptr_array_neurongroup_lastspike ndarray((100,), float64) range=[-1e+04, -1e+04]\n", - " _ptr_array_neurongroup_not_refractory ndarray((100,), bool) range=[1, 1]\n", - " _ptr_array_neurongroup_v ndarray((100,), float64) range=[0, 0.009957]\n", - " _ptr_array_neurongroup_v0 ndarray((100,), float64) range=[0, 0.02]\n", - "\n", - " [Sizes (_num*)]\n", - " _numdt int = 1\n", - " _numlastspike int = 100\n", - " _numnot_refractory int = 100\n", - " _numt int = 1\n", - " _numv int = 100\n", - " _numv0 int = 100\n", - "\n", - " [Constants (scalars)]\n", - " N int = 100\n", - " dt float = 0.0001\n", - " tau float = 0.01\n", - "\n", - " [Variable Objects (_var_*)]\n", - " _var_N Constant\n", - " _var_dt ArrayVariable\n", - " _var_lastspike ArrayVariable\n", - " _var_not_refractory ArrayVariable\n", - " _var_t ArrayVariable\n", - " _var_tau Constant\n", - " _var_v ArrayVariable\n", - " _var_v0 ArrayVariable\n", - "\n", - " [Other]\n", - " _owner NeuronGroup\n", - " logical_not ufunc" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# ---- 4. View the namespace ----\n", - "intro.namespace(\"*stateupdater*\")" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "674aac98", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['_brian_cppyy_rng',\n", - " '_brian_cppyy_run_neurongroup_spike_resetter_codeobject',\n", - " '_brian_cppyy_run_neurongroup_spike_thresholder_codeobject',\n", - " '_brian_cppyy_run_neurongroup_stateupdater_codeobject',\n", - " '_brian_cppyy_run_statemonitor_codeobject']" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# ---- 5. View C++ globals ----\n", - "intro.cpp_globals()" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "cb14d40f", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "============================================================\n", - "EVAL C++\n", - "============================================================\n", - "M_PI = 3.141592653589793\n", - "sizeof(double) = 8\n", - "_brian_mod(7, 3) = 1\n" - ] - } - ], - "source": [ - "# ---- 6. Evaluate a C++ expression ----\n", - "print(\"\\n\" + \"=\" * 60)\n", - "print(\"EVAL C++\")\n", - "print(\"=\" * 60)\n", - "print(f\"M_PI = {intro.eval_cpp('M_PI')}\")\n", - "print(f\"sizeof(double) = {intro.eval_cpp('sizeof(double)', 'size_t')}\")\n", - "print(f\"_brian_mod(7, 3) = {intro.eval_cpp('_brian_mod(7, 3)', 'int32_t')}\")" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "17f5f212", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'\\n \\n // scalar code (runs once, outside the loop)\\n const size_t _vectorisation_idx = -1;\\n \\n const double dt = _ptr_array_defaultclock_dt[0];\\n const double t = _ptr_array_defaultclock_t[0];\\n const int64_t _lio_1 = _timestep(0.005, dt);\\n const double _lio_2 = 1.0f*(- dt)/tau;\\n const double _lio_3 = exp(_lio_2);\\n\\n\\n const int _N = N;\\n\\n // vector code (runs per neuron)\\n for (int _idx = 0; _idx < _N; _idx++) {\\n const size_t _vectorisation_idx = _idx;\\n \\n const double lastspike = _ptr_array_neurongroup_lastspike[_idx];\\n char not_refractory = _ptr_array_neurongroup_not_refractory[_idx];\\n double v = _ptr_array_neurongroup_v[_idx];\\n const double v0 = _ptr_array_neurongroup_v0[_idx];\\n not_refractory = _timestep(t - lastspike, dt) >= _lio_1;\\n double _v;\\n if(!not_refractory)\\n _v = (v + v0) - v0;\\n else \\n _v = ((_lio_3 * v) + v0) - (_lio_3 * v0);\\n if(not_refractory)\\n v = _v;\\n _ptr_array_neurongroup_not_refractory[_idx] = not_refractory;\\n _ptr_array_neurongroup_v[_idx] = v;\\n\\n }\\n'" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# --- Function body replacement ---\n", - "body = intro.get_body(\"*stateupdater*\", \"run\")\n", - "body" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "e5c1b14a", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO introspector: compiling _brian_cppyy_run_neurongroup_stateupdater_codeobject_v1 (replacing _brian_cppyy_run_neurongroup_stateupdater_codeobject) [brian2.codegen.runtime.cppyy_rt.introspector]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "============================================================\n", - "REPLACING WITH LINEAR APPROXIMATION\n", - "============================================================\n" - ] - }, - { - "data": { - "text/plain": [ - "'_brian_cppyy_run_neurongroup_stateupdater_codeobject_v1'" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "v_before = np.array(group.v[:])\n", - "new_body = body.replace(\"exp(_lio_2)\", \"(1.0 + _lio_2)\")\n", - "print(\"\\n\" + \"=\" * 60)\n", - "print(\"REPLACING WITH LINEAR APPROXIMATION\")\n", - "print(\"=\" * 60)\n", - "versioned_name = intro.replace_body(\"*stateupdater*\", \"run\", new_body)\n", - "versioned_name" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "b27307d3", - "metadata": {}, - "outputs": [], - "source": [ - "run(100 * ms)\n", - "v_after_mod = np.array(group.v[:])" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "83dfdade", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO introspector: restored original neurongroup_stateupdater_codeobject.run [brian2.codegen.runtime.cppyy_rt.introspector]\n" - ] - } - ], - "source": [ - "# --- Restore ---\n", - "intro.restore(\"*stateupdater*\", \"run\")\n" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "d71d2621", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO introspector: injected custom C++ code [brian2.codegen.runtime.cppyy_rt.introspector]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "leaky_relu(-5, 0.01) = -0.05\n", - "leaky_relu(3, 0.01) = 3.0\n" - ] - } - ], - "source": [ - "intro.inject_cpp(\"\"\"\n", - "inline double leaky_relu(double x, double alpha) {\n", - " return x > 0.0 ? x : alpha * x;\n", - "}\n", - "\"\"\")\n", - "print(f\"\\nleaky_relu(-5, 0.01) = {intro.eval_cpp('leaky_relu(-5.0, 0.01)')}\")\n", - "print(f\"leaky_relu(3, 0.01) = {intro.eval_cpp('leaky_relu(3.0, 0.01)')}\")\n" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "ba35148e", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
                               Compiled Code Objects                               \n",
-       "┏━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━┳━━━━━━━━┓\n",
-       "    Code Object                                 Template      Blocks  # Vars \n",
-       "┡━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━╇━━━━━━━━┩\n",
-       " neurongroup_spike_resetter_codeobject       reset         run          5 \n",
-       " neurongroup_spike_resetter_codeobject_1     reset         run          5 \n",
-       " neurongroup_spike_thresholder_codeobject    threshold     run          8 \n",
-       " neurongroup_spike_thresholder_codeobject_1  threshold     run          8 \n",
-       " neurongroup_stateupdater_codeobject         stateupdate   run         11 \n",
-       " neurongroup_stateupdater_codeobject_1       stateupdate   run         11 \n",
-       " statemonitor_codeobject                     statemonitor  run          8 \n",
-       " statemonitor_codeobject_1                   statemonitor  run          8 \n",
-       "└───┴────────────────────────────────────────────┴──────────────┴────────┴────────┘\n",
-       "
\n" - ], - "text/plain": [ - "\u001b[3m \u001b[0m\u001b[1;3mCompiled Code Objects\u001b[0m\u001b[3m \u001b[0m\n", - "\u001b[2m┏━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━┳━━━━━━━━┓\u001b[0m\n", - "\u001b[2m┃\u001b[0m\u001b[1;36m \u001b[0m\u001b[1;36m \u001b[0m\u001b[1;36m \u001b[0m\u001b[2m┃\u001b[0m\u001b[1;36m \u001b[0m\u001b[1;36mCode Object \u001b[0m\u001b[1;36m \u001b[0m\u001b[2m┃\u001b[0m\u001b[1;36m \u001b[0m\u001b[1;36mTemplate \u001b[0m\u001b[1;36m \u001b[0m\u001b[2m┃\u001b[0m\u001b[1;36m \u001b[0m\u001b[1;36mBlocks\u001b[0m\u001b[1;36m \u001b[0m\u001b[2m┃\u001b[0m\u001b[1;36m \u001b[0m\u001b[1;36m# Vars\u001b[0m\u001b[1;36m \u001b[0m\u001b[2m┃\u001b[0m\n", - "\u001b[2m┡━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━╇━━━━━━━━┩\u001b[0m\n", - "\u001b[2m│\u001b[0m ● \u001b[2m│\u001b[0m\u001b[32m \u001b[0m\u001b[32mneurongroup_spike_resetter_codeobject \u001b[0m\u001b[32m \u001b[0m\u001b[2m│\u001b[0m\u001b[33m \u001b[0m\u001b[33mreset \u001b[0m\u001b[33m \u001b[0m\u001b[2m│\u001b[0m run \u001b[2m│\u001b[0m 5 \u001b[2m│\u001b[0m\n", - "\u001b[2m│\u001b[0m ● \u001b[2m│\u001b[0m\u001b[32m \u001b[0m\u001b[32mneurongroup_spike_resetter_codeobject_1 \u001b[0m\u001b[32m \u001b[0m\u001b[2m│\u001b[0m\u001b[33m \u001b[0m\u001b[33mreset \u001b[0m\u001b[33m \u001b[0m\u001b[2m│\u001b[0m run \u001b[2m│\u001b[0m 5 \u001b[2m│\u001b[0m\n", - "\u001b[2m│\u001b[0m ● \u001b[2m│\u001b[0m\u001b[32m \u001b[0m\u001b[32mneurongroup_spike_thresholder_codeobject \u001b[0m\u001b[32m \u001b[0m\u001b[2m│\u001b[0m\u001b[33m \u001b[0m\u001b[33mthreshold \u001b[0m\u001b[33m \u001b[0m\u001b[2m│\u001b[0m run \u001b[2m│\u001b[0m 8 \u001b[2m│\u001b[0m\n", - "\u001b[2m│\u001b[0m ● \u001b[2m│\u001b[0m\u001b[32m \u001b[0m\u001b[32mneurongroup_spike_thresholder_codeobject_1\u001b[0m\u001b[32m \u001b[0m\u001b[2m│\u001b[0m\u001b[33m \u001b[0m\u001b[33mthreshold \u001b[0m\u001b[33m \u001b[0m\u001b[2m│\u001b[0m run \u001b[2m│\u001b[0m 8 \u001b[2m│\u001b[0m\n", - "\u001b[2m│\u001b[0m ● \u001b[2m│\u001b[0m\u001b[32m \u001b[0m\u001b[32mneurongroup_stateupdater_codeobject \u001b[0m\u001b[32m \u001b[0m\u001b[2m│\u001b[0m\u001b[33m \u001b[0m\u001b[33mstateupdate \u001b[0m\u001b[33m \u001b[0m\u001b[2m│\u001b[0m run \u001b[2m│\u001b[0m 11 \u001b[2m│\u001b[0m\n", - "\u001b[2m│\u001b[0m ● \u001b[2m│\u001b[0m\u001b[32m \u001b[0m\u001b[32mneurongroup_stateupdater_codeobject_1 \u001b[0m\u001b[32m \u001b[0m\u001b[2m│\u001b[0m\u001b[33m \u001b[0m\u001b[33mstateupdate \u001b[0m\u001b[33m \u001b[0m\u001b[2m│\u001b[0m run \u001b[2m│\u001b[0m 11 \u001b[2m│\u001b[0m\n", - "\u001b[2m│\u001b[0m ● \u001b[2m│\u001b[0m\u001b[32m \u001b[0m\u001b[32mstatemonitor_codeobject \u001b[0m\u001b[32m \u001b[0m\u001b[2m│\u001b[0m\u001b[33m \u001b[0m\u001b[33mstatemonitor\u001b[0m\u001b[33m \u001b[0m\u001b[2m│\u001b[0m run \u001b[2m│\u001b[0m 8 \u001b[2m│\u001b[0m\n", - "\u001b[2m│\u001b[0m ● \u001b[2m│\u001b[0m\u001b[32m \u001b[0m\u001b[32mstatemonitor_codeobject_1 \u001b[0m\u001b[32m \u001b[0m\u001b[2m│\u001b[0m\u001b[33m \u001b[0m\u001b[33mstatemonitor\u001b[0m\u001b[33m \u001b[0m\u001b[2m│\u001b[0m run \u001b[2m│\u001b[0m 8 \u001b[2m│\u001b[0m\n", - "\u001b[2m└───┴────────────────────────────────────────────┴──────────────┴────────┴────────┘\u001b[0m\n" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "intro.print_objects()" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "449ebe57", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'name': 'neurongroup_stateupdater_codeobject',\n", - " 'sources': {'run': '#ifndef _BRIAN_CPPYY_SC_41c7dc13377ec1af\\n#define _BRIAN_CPPYY_SC_41c7dc13377ec1af\\n// Per-codeobject support code\\n\\ninline int64_t _timestep(double t, double dt) {\\n return (int64_t)((t + 1e-3*dt)/dt);\\n}\\n\\n// Template-specific support code (e.g. synaptic queue access)\\n\\n#endif // _BRIAN_CPPYY_SC_41c7dc13377ec1af\\n\\nextern \"C\" void _brian_cppyy_run_neurongroup_stateupdater_codeobject(int64_t N, double* _ptr_array_defaultclock_dt, double* _ptr_array_neurongroup_lastspike, int _numlastspike, int8_t* _ptr_array_neurongroup_not_refractory, int _numnot_refractory, double* _ptr_array_defaultclock_t, double tau, double* _ptr_array_neurongroup_v, int _numv, double* _ptr_array_neurongroup_v0, int _numv0) {\\n \\n // scalar code (runs once, outside the loop)\\n const size_t _vectorisation_idx = -1;\\n \\n const double dt = _ptr_array_defaultclock_dt[0];\\n const double t = _ptr_array_defaultclock_t[0];\\n const int64_t _lio_1 = _timestep(0.005, dt);\\n const double _lio_2 = 1.0f*(- dt)/tau;\\n const double _lio_3 = exp(_lio_2);\\n\\n\\n const int _N = N;\\n\\n // vector code (runs per neuron)\\n for (int _idx = 0; _idx < _N; _idx++) {\\n const size_t _vectorisation_idx = _idx;\\n \\n const double lastspike = _ptr_array_neurongroup_lastspike[_idx];\\n char not_refractory = _ptr_array_neurongroup_not_refractory[_idx];\\n double v = _ptr_array_neurongroup_v[_idx];\\n const double v0 = _ptr_array_neurongroup_v0[_idx];\\n not_refractory = _timestep(t - lastspike, dt) >= _lio_1;\\n double _v;\\n if(!not_refractory)\\n _v = (v + v0) - v0;\\n else \\n _v = ((_lio_3 * v) + v0) - (_lio_3 * v0);\\n if(not_refractory)\\n v = _v;\\n _ptr_array_neurongroup_not_refractory[_idx] = not_refractory;\\n _ptr_array_neurongroup_v[_idx] = v;\\n\\n }\\n}'},\n", - " 'versions': {},\n", - " 'arrays': {'_ptr_array_defaultclock_dt': {'shape': (1,),\n", - " 'dtype': 'float64',\n", - " 'min': 0.0001,\n", - " 'max': 0.0001,\n", - " 'mean': 0.0001},\n", - " '_ptr_array_neurongroup_lastspike': {'shape': (100,),\n", - " 'dtype': 'float64',\n", - " 'min': -10000.0,\n", - " 'max': -10000.0,\n", - " 'mean': -10000.0},\n", - " '_ptr_array_neurongroup_not_refractory': {'shape': (100,),\n", - " 'dtype': 'bool',\n", - " 'min': 1.0,\n", - " 'max': 1.0,\n", - " 'mean': 1.0},\n", - " '_ptr_array_defaultclock_t': {'shape': (1,),\n", - " 'dtype': 'float64',\n", - " 'min': 0.2,\n", - " 'max': 0.2,\n", - " 'mean': 0.2},\n", - " '_ptr_array_neurongroup_v': {'shape': (100,),\n", - " 'dtype': 'float64',\n", - " 'min': 0.0,\n", - " 'max': 0.009908746662274415,\n", - " 'mean': 0.005471325415868882},\n", - " '_ptr_array_neurongroup_v0': {'shape': (100,),\n", - " 'dtype': 'float64',\n", - " 'min': 0.0,\n", - " 'max': 0.02,\n", - " 'mean': 0.01}}}" - ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# --- Snapshot ---\n", - "intro.snapshot(\"*stateupdater*\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fdf40611", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.13.5" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/test-cppyy-audit.py b/test-cppyy-audit.py deleted file mode 100644 index 196c8d362..000000000 --- a/test-cppyy-audit.py +++ /dev/null @@ -1,364 +0,0 @@ -""" -Comprehensive test suite for the cppyy JIT backend. - -Each test runs in a separate subprocess because Cling (the JIT compiler) -accumulates state that can conflict across start_scope() calls within a -single process. Subprocess isolation gives each test a clean Cling session. - -16 tests covering: basic neurons, all 3 monitors, RNG seeding, 4 synapse -connection methods, STDP, summed variables, multisynaptic index, multi-run, -store/restore, refractoriness, delayed synapses. -""" -import subprocess -import sys -import textwrap -import time - -_PREAMBLE = textwrap.dedent("""\ - import numpy as np - from brian2 import * - prefs.codegen.target = "cppyy" -""") - -TESTS = {} - - -def register(name): - def decorator(func): - # Extract function body source from the docstring - TESTS[name] = func.__doc__ - return func - return decorator - - -@register("Basic LIF neuron") -def _(): - """ - G = NeuronGroup(10, 'dv/dt = -v/(10*ms) + 0.5/ms : 1', - threshold='v > 1', reset='v = 0', method='euler') - run(50 * ms) - assert np.any(G.v[:] > 0), "Neurons should have nonzero v" - """ - - -@register("SpikeMonitor") -def _(): - """ - G = NeuronGroup(5, 'dv/dt = -v/(10*ms) + 0.5/ms : 1', - threshold='v > 1', reset='v = 0', method='euler') - mon = SpikeMonitor(G) - run(100 * ms) - assert mon.num_spikes > 0, "Should have recorded spikes" - assert len(mon.t) == len(mon.i), "t and i arrays must match" - assert np.all(mon.i[:] < 5), "Spike indices must be in range" - """ - - -@register("StateMonitor") -def _(): - """ - G = NeuronGroup(5, 'dv/dt = -v/(10*ms) + 0.5/ms : 1', - threshold='v > 1', reset='v = 0', method='euler') - mon = StateMonitor(G, 'v', record=[0, 2, 4]) - run(20 * ms) - assert mon.t.shape[0] > 0, "Should have recorded timesteps" - assert mon.v.shape == (3, mon.t.shape[0]), "Shape mismatch" - """ - - -@register("RateMonitor") -def _(): - """ - G = NeuronGroup(50, 'dv/dt = -v/(10*ms) + 0.5/ms : 1', - threshold='v > 1', reset='v = 0', method='euler') - mon = PopulationRateMonitor(G) - run(100 * ms) - assert len(mon.t) > 0, "Should have recorded rate" - assert len(mon.rate) == len(mon.t), "rate and t must match" - assert np.any(mon.rate[:] > 0 * Hz), "Should have nonzero rate" - """ - - -@register("RNG seeding reproducibility") -def _(): - """ - seed(12345) - G = NeuronGroup(10, 'dv/dt = -v/(10*ms) + xi*sqrt(2/(10*ms)) : 1', method='euler') - run(10 * ms) - result1 = np.array(G.v[:]) - - start_scope() - seed(12345) - G2 = NeuronGroup(10, 'dv/dt = -v/(10*ms) + xi*sqrt(2/(10*ms)) : 1', method='euler') - run(10 * ms) - result2 = np.array(G2.v[:]) - - np.testing.assert_array_equal(result1, result2) - """ - - -@register("Synapses - explicit i/j") -def _(): - """ - pre = NeuronGroup(5, 'dv/dt = -v/(10*ms) + 0.5/ms : 1', - threshold='v > 1', reset='v = 0', method='euler') - post = NeuronGroup(3, 'dv/dt = -v/(10*ms) : 1', method='euler') - S = Synapses(pre, post, 'w : 1', on_pre='v_post += w') - S.connect(i=[0, 1, 2, 3, 4], j=[0, 1, 2, 0, 1]) - S.w = 0.5 - run(50 * ms) - assert len(S) == 5, f"Expected 5 synapses, got {len(S)}" - """ - - -@register("Synapses - one-to-one") -def _(): - """ - G = NeuronGroup(10, 'dv/dt = -v/(10*ms) + 0.3/ms : 1', - threshold='v > 1', reset='v = 0', method='euler') - S = Synapses(G, G, 'w : 1', on_pre='v_post += w') - S.connect(j='i', skip_if_invalid=True) - S.w = 0.1 - run(50 * ms) - assert len(S) == 10, f"Expected 10 synapses, got {len(S)}" - """ - - -@register("Synapses - all-to-all") -def _(): - """ - pre = NeuronGroup(4, 'dv/dt = -v/(10*ms) + 0.5/ms : 1', - threshold='v > 1', reset='v = 0', method='euler') - post = NeuronGroup(3, 'dv/dt = -v/(10*ms) : 1', method='euler') - S = Synapses(pre, post, 'w : 1', on_pre='v_post += w') - S.connect() - S.w = 0.1 - run(50 * ms) - assert len(S) == 12, f"Expected 12 synapses, got {len(S)}" - """ - - -@register("Synapses - probabilistic") -def _(): - """ - seed(42) - pre = NeuronGroup(20, 'dv/dt = -v/(10*ms) + 0.5/ms : 1', - threshold='v > 1', reset='v = 0', method='euler') - post = NeuronGroup(20, 'dv/dt = -v/(10*ms) : 1', method='euler') - S = Synapses(pre, post, 'w : 1', on_pre='v_post += w') - S.connect(p=0.5) - S.w = 0.05 - run(20 * ms) - assert 50 < len(S) < 350, f"Unexpected synapse count: {len(S)}" - """ - - -@register("STDP") -def _(): - """ - inp = NeuronGroup(10, 'dv/dt = -v/(10*ms) + 0.3/ms : 1', - threshold='v > 1', reset='v = 0', method='euler') - out = NeuronGroup(10, 'dv/dt = -v/(20*ms) : 1', - threshold='v > 1', reset='v = 0', method='euler') - S = Synapses(inp, out, - \'\'\'w : 1 - dApre/dt = -Apre / (20*ms) : 1 (event-driven) - dApost/dt = -Apost / (20*ms) : 1 (event-driven)\'\'\', - on_pre=\'\'\'v_post += w - Apre += 0.01 - w = clip(w + Apost, 0, 1)\'\'\', - on_post=\'\'\'Apost += -0.01 - w = clip(w + Apre, 0, 1)\'\'\') - S.connect(j='i') - S.w = 0.5 - run(100 * ms) - assert len(S) == 10 - w_vals = np.array(S.w[:]) - assert not np.allclose(w_vals, 0.5), "STDP should modify weights" - """ - - -@register("Summed variable") -def _(): - """ - G_pre = NeuronGroup(5, 'dv/dt = -v/(10*ms) + 0.5/ms : 1', - threshold='v > 1', reset='v = 0', method='euler') - G_post = NeuronGroup(3, - \'\'\'dv/dt = (I_syn - v) / (10*ms) : 1 - I_syn : 1\'\'\', - threshold='v > 1', reset='v = 0', method='euler') - S = Synapses(G_pre, G_post, 'w : 1\\nI_syn_post = w : 1 (summed)') - S.connect() - S.w = '0.1 * rand()' - mon = StateMonitor(G_post, 'I_syn', record=[0]) - run(50 * ms) - assert len(S) == 15 - assert mon.t.shape[0] > 0 - """ - - -@register("Multisynaptic index") -def _(): - """ - pre = NeuronGroup(3, 'dv/dt = -v/(10*ms) + 0.5/ms : 1', - threshold='v > 1', reset='v = 0', method='euler') - post = NeuronGroup(3, 'dv/dt = -v/(10*ms) : 1', method='euler') - S = Synapses(pre, post, 'w : 1', on_pre='v_post += w', multisynaptic_index='k') - S.connect(i=[0, 0, 1, 1, 1], j=[0, 0, 1, 1, 2]) - S.w = '0.1 * (k + 1)' - run(20 * ms) - assert len(S) == 5 - w_vals = np.array(S.w[:]) - assert len(w_vals) == 5 - """ - - -@register("Multi-run") -def _(): - """ - G = NeuronGroup(5, 'dv/dt = -v/(10*ms) + 0.5/ms : 1', - threshold='v > 1', reset='v = 0', method='euler') - mon = SpikeMonitor(G) - run(50 * ms) - spikes_1 = mon.num_spikes - run(50 * ms) - spikes_2 = mon.num_spikes - assert spikes_2 >= spikes_1, "Second run should add more spikes" - assert spikes_2 > 0, "Should have spikes after 100ms total" - """ - - -@register("Store/restore") -def _(): - """ - G = NeuronGroup(5, 'dv/dt = -v/(10*ms) + 0.5/ms : 1', - threshold='v > 1', reset='v = 0', method='euler') - run(20 * ms) - v_before = np.array(G.v[:]) - store() - run(30 * ms) - restore() - v_restored = np.array(G.v[:]) - np.testing.assert_array_almost_equal(v_before, v_restored) - """ - - -@register("Refractoriness") -def _(): - """ - # Use string-based refractory condition (like the HH model) - G = NeuronGroup(1, 'dv/dt = 0.25/ms : 1', threshold='v > 1', - reset='v = 0', refractory='v > 0.5', method='euler') - mon = SpikeMonitor(G) - run(100 * ms) - spike_times = np.array(mon.t[:]) - assert len(spike_times) >= 2, "Should have at least 2 spikes" - # Verify spikes are regular (neuron resets, climbs back, spikes again) - isis = np.diff(spike_times) - assert np.std(isis) / np.mean(isis) < 0.1, \ - f"ISIs should be regular, got std/mean = {np.std(isis)/np.mean(isis):.3f}" - """ - - -@register("Delayed synapses") -def _(): - """ - inp = NeuronGroup(1, 'dv/dt = 2.0/ms : 1', threshold='v > 1', - reset='v = 0', method='euler') - out = NeuronGroup(1, 'dv/dt = -v/(10*ms) : 1', method='euler') - S = Synapses(inp, out, 'w : 1', on_pre='v_post += w') - S.connect() - S.w = 0.5 - S.delay = 3 * ms - mon_out = StateMonitor(out, 'v', record=[0]) - mon_in = SpikeMonitor(inp) - run(30 * ms) - assert mon_in.num_spikes > 0, "Input should spike" - assert np.any(mon_out.v[0] > 0), "Output should receive delayed input" - """ - - -@register("SpikeGeneratorGroup") -def _(): - """ - # Test the spikegenerator template - indices = np.array([0, 1, 2, 1, 0]) - times = np.array([2, 4, 6, 8, 10]) * ms - G = SpikeGeneratorGroup(3, indices, times) - mon = SpikeMonitor(G) - run(15 * ms) - assert mon.num_spikes == 5, f"Expected 5 spikes, got {mon.num_spikes}" - recorded_i = np.array(mon.i[:]) - np.testing.assert_array_equal(np.sort(recorded_i), np.sort(indices)) - """ - - -@register("SpikeGeneratorGroup periodic") -def _(): - """ - # SpikeGeneratorGroup with period - indices = np.array([0, 1]) - times = np.array([1, 3]) * ms - G = SpikeGeneratorGroup(2, indices, times, period=5*ms) - mon = SpikeMonitor(G) - run(20 * ms) - # With 5ms period over 20ms, expect 4 cycles * 2 spikes = 8 - assert mon.num_spikes >= 6, f"Expected >= 6 periodic spikes, got {mon.num_spikes}" - """ - - -def run_test(index, name, code): - """Run a single test in a subprocess.""" - full_code = _PREAMBLE + textwrap.dedent(code) - t0 = time.perf_counter() - result = subprocess.run( - [sys.executable, "-c", full_code], - capture_output=True, - text=True, - timeout=120, - ) - elapsed = time.perf_counter() - t0 - - if result.returncode == 0: - print(f" [{index:2d}] {name}... PASS ({elapsed:.2f}s)") - return True - else: - print(f" [{index:2d}] {name}... FAIL ({elapsed:.2f}s)") - # Show last few lines of stderr (skip cppyy noise) - err_lines = [ - l for l in result.stderr.strip().split("\n") - if not l.startswith("[/") and "no debug info" not in l - ] - if err_lines: - for line in err_lines[-5:]: - print(f" {line}") - return False - - -if __name__ == "__main__": - print("=" * 60) - print("cppyy Backend Comprehensive Test Suite") - print("(each test runs in isolated subprocess)") - print("=" * 60) - - passed = 0 - failed = 0 - failed_names = [] - - t_total = time.perf_counter() - for i, (name, code) in enumerate(TESTS.items(), 1): - if run_test(i, name, code): - passed += 1 - else: - failed += 1 - failed_names.append(name) - t_total = time.perf_counter() - t_total - - print() - print("=" * 60) - print(f"Results: {passed} passed, {failed} failed ({t_total:.1f}s)") - if failed_names: - print(f"Failed: {', '.join(failed_names)}") - print("=" * 60) - - sys.exit(0 if failed == 0 else 1) diff --git a/test-cppyy-dynarray.py b/test-cppyy-dynarray.py deleted file mode 100644 index 076af6ff6..000000000 --- a/test-cppyy-dynarray.py +++ /dev/null @@ -1,102 +0,0 @@ -"""Test cppyy DynamicArray implementation.""" -import numpy as np - -# Test the cppyy implementation directly -from brian2.memory.cppyy_dynamicarray import ( - CppyyDynamicArray1D, - CppyyDynamicArray2D, - DynamicArray, - DynamicArray1D, -) - -print("=" * 60) -print("TEST: CppyyDynamicArray1D") -print("=" * 60) - -for dtype in [np.float64, np.float32, np.int32, np.int64, np.bool_]: - arr = CppyyDynamicArray1D(10, dtype=dtype) - assert len(arr) == 10, f"len={len(arr)}, expected 10" - assert arr.shape == (10,), f"shape={arr.shape}" - assert arr.data.dtype == dtype or (dtype == np.bool_ and arr.data.dtype == np.int8), f"dtype mismatch: {arr.data.dtype}" - arr[0] = 42 if dtype != np.bool_ else 1 - assert arr[0] == (42 if dtype != np.bool_ else 1) - arr.resize(20) - assert len(arr) == 20 - arr.shrink(5) - assert len(arr) == 5 - capsule = arr.get_capsule() - assert capsule is not None - print(f" {dtype}: OK") - -print() -print("=" * 60) -print("TEST: CppyyDynamicArray2D") -print("=" * 60) - -for dtype in [np.float64, np.int32]: - arr2d = CppyyDynamicArray2D((5, 3), dtype=dtype) - assert arr2d.shape == (5, 3), f"shape={arr2d.shape}" - assert len(arr2d) == 5 - d = arr2d.data - assert d.shape == (5, 3), f"data shape={d.shape}" - arr2d[0, 0] = 99 - assert arr2d[0, 0] == 99 - arr2d.resize_along_first(10) - assert arr2d.shape == (10, 3) - capsule = arr2d.get_capsule() - assert capsule is not None - print(f" {dtype}: OK") - -print() -print("=" * 60) -print("TEST: Factory functions") -print("=" * 60) - -a1 = DynamicArray(5, dtype=np.float64) -assert isinstance(a1, CppyyDynamicArray1D) -a2 = DynamicArray((3, 4), dtype=np.int32) -assert isinstance(a2, CppyyDynamicArray2D) -a3 = DynamicArray1D(10, dtype=np.float32) -assert isinstance(a3, CppyyDynamicArray1D) -print(" Factory functions: OK") - -print() -print("=" * 60) -print("TEST: Capsule compatibility with cppyy C++ extraction") -print("=" * 60) - -import cppyy -# Ensure support code is loaded -from brian2.codegen.runtime.cppyy_rt.cppyy_rt import _ensure_support_code -_ensure_support_code() - -arr = CppyyDynamicArray1D(5, dtype=np.float64) -arr[:] = [1.0, 2.0, 3.0, 4.0, 5.0] -capsule = arr.get_capsule() - -# Extract the C++ pointer from capsule in C++ and verify data -cppyy.cppdef(""" -extern "C" double _test_capsule_extract(PyObject* cap) { - auto* dyn = _extract_dynamic_array_1d(cap); - return dyn->get_data_ptr()[2]; // should be 3.0 -} -""") -result = cppyy.gbl._test_capsule_extract(capsule) -assert result == 3.0, f"Expected 3.0, got {result}" -print(" Capsule extraction: OK (C++ read value 3.0)") - -# Test resize from C++ side -cppyy.cppdef(""" -extern "C" void _test_capsule_resize(PyObject* cap) { - auto* dyn = _extract_dynamic_array_1d(cap); - dyn->resize(10); - dyn->get_data_ptr()[9] = 99.0; -} -""") -cppyy.gbl._test_capsule_resize(capsule) -assert len(arr) == 10, f"len={len(arr)}, expected 10 after C++ resize" -assert arr[9] == 99.0, f"arr[9]={arr[9]}, expected 99.0" -print(" C++ resize via capsule: OK") - -print() -print("All DynamicArray tests passed!") diff --git a/test-cppyy-synapses.py b/test-cppyy-synapses.py deleted file mode 100644 index 3a9b41bad..000000000 --- a/test-cppyy-synapses.py +++ /dev/null @@ -1,108 +0,0 @@ -"""Test cppyy synapse support: SpikeQueue + Synapses templates.""" -import time -import numpy as np -from brian2 import * - -prefs.codegen.target = 'cppyy' - -print("=" * 60) -print("TEST 1: Basic Synapses with fixed connectivity") -print("=" * 60) - -start_scope() - -# Simple pre-post network -inp = NeuronGroup(5, 'dv/dt = -v/(10*ms) + 0.5/ms : 1', - threshold='v > 1', reset='v = 0', method='euler') - -out = NeuronGroup(3, 'dv/dt = -v/(10*ms) : 1', - threshold='v > 1', reset='v = 0', method='euler') - -S = Synapses(inp, out, 'w : 1', on_pre='v_post += w') -S.connect(i=[0, 1, 2, 3, 4], j=[0, 1, 2, 0, 1]) -S.w = 0.5 - -spike_inp = SpikeMonitor(inp) -spike_out = SpikeMonitor(out) -state_out = StateMonitor(out, 'v', record=True) - -t_start = time.perf_counter() -run(200*ms) -t_elapsed = time.perf_counter() - t_start - -print(f" Ran in {t_elapsed:.2f}s") -print(f" Input spikes: {spike_inp.num_spikes}") -print(f" Output spikes: {spike_out.num_spikes}") -print(f" Synapses created: {len(S)}") -print(f" StateMonitor: {state_out.t.shape[0]} timesteps") -assert len(S) == 5, f"Expected 5 synapses, got {len(S)}" -print(" PASS") - -print() -print("=" * 60) -print("TEST 2: STDP-like synapse with pre/post pathways") -print("=" * 60) - -start_scope() - -N = 10 -inp2 = NeuronGroup(N, 'dv/dt = -v/(10*ms) + 0.3/ms : 1', - threshold='v > 1', reset='v = 0', method='euler') - -out2 = NeuronGroup(N, 'dv/dt = -v/(20*ms) : 1', - threshold='v > 1', reset='v = 0', method='euler') - -S2 = Synapses(inp2, out2, - '''w : 1 - dApre/dt = -Apre / (20*ms) : 1 (event-driven) - dApost/dt = -Apost / (20*ms) : 1 (event-driven)''', - on_pre='''v_post += w - Apre += 0.01 - w = clip(w + Apost, 0, 1)''', - on_post='''Apost += -0.01 - w = clip(w + Apre, 0, 1)''') -S2.connect(j='i') # one-to-one -S2.w = 0.5 - -run(100*ms) - -print(f" Synapses: {len(S2)}") -print(f" Weight range: {float(np.min(S2.w)):.4f} - {float(np.max(S2.w)):.4f}") -assert len(S2) == N, f"Expected {N} synapses, got {len(S2)}" -print(" PASS") - -print() -print("=" * 60) -print("TEST 3: Summed variable (synaptic current)") -print("=" * 60) - -start_scope() - -eqs_neurons = ''' -dv/dt = (I_syn - v) / (10*ms) : 1 -I_syn : 1 -''' - -G_pre = NeuronGroup(5, 'dv/dt = -v/(10*ms) + 0.5/ms : 1', - threshold='v > 1', reset='v = 0', method='euler') - -G_post = NeuronGroup(3, eqs_neurons, threshold='v > 1', reset='v = 0', - method='euler') - -S3 = Synapses(G_pre, G_post, - '''w : 1 - I_syn_post = w : 1 (summed)''') -S3.connect() # all-to-all -S3.w = '0.1 * rand()' - -sm = StateMonitor(G_post, 'I_syn', record=[0]) - -run(50*ms) - -print(f" Synapses: {len(S3)}") -print(f" I_syn recorded: {sm.t.shape[0]} timesteps") -print(f" I_syn range: {float(np.min(sm.I_syn[0])):.4f} - {float(np.max(sm.I_syn[0])):.4f}") -print(" PASS") - -print() -print("All synapse tests complete!") From 934869fbd26627a7831e0f7ec6475fe7a74e38c6 Mon Sep 17 00:00:00 2001 From: Legend101Zz <96632943+Legend101Zz@users.noreply.github.com> Date: Wed, 6 May 2026 16:00:04 +0530 Subject: [PATCH 29/29] fix(cppyy): fix RNG reproducibility, seeding, state save/restore, and GSL skipping MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Three bugs caused CI failures for the cppyy runtime target: 1. `static std::mt19937 _brian_cppyy_rng` had internal linkage, so each new Cling translation unit (compiled per network.run() call) got a fresh default-seeded copy — all runs produced identical random values. Fix: remove `static` to give external linkage; one shared instance across all TUs. Also move `_dist_rand` to file scope (no static). 2. `seed()` checked `hasattr(cppyy.gbl, "_brian_cppyy_seed")` before the support code was compiled, so pre-run seed() calls were silent no-ops. Fix: call `_ensure_support_code()` eagerly inside `seed()`. 3. `get/set_random_state()` ignored C++ RNG state entirely, so `restore(restore_random_state=True)` could not reproduce identical runs. Fix: expose `_brian_cppyy_get/set_rng_state()` C++ functions (using std::ostringstream/istringstream) and integrate into get/set_random_state(). Additionally, `std::normal_distribution` has an internal cache that cannot be serialized. Replace with a custom Marsaglia polar method using explicit `_brian_randn_has_spare` / `_brian_randn_spare` file-scope variables that round-trip cleanly through the state string. GSL tests were also failing because `skip_if_not_implemented` only skipped for the numpy target, not cppyy. Fix: check `effective in ("numpy", "cppyy")`. --- brian2/codegen/runtime/cppyy_rt/cppyy_rt.py | 46 ++++++++++++++++++--- brian2/devices/device.py | 34 +++++++++++---- brian2/tests/test_GSL.py | 8 ++-- 3 files changed, 72 insertions(+), 16 deletions(-) diff --git a/brian2/codegen/runtime/cppyy_rt/cppyy_rt.py b/brian2/codegen/runtime/cppyy_rt/cppyy_rt.py index c52bb5525..b6c25c3a6 100644 --- a/brian2/codegen/runtime/cppyy_rt/cppyy_rt.py +++ b/brian2/codegen/runtime/cppyy_rt/cppyy_rt.py @@ -306,6 +306,8 @@ def _ensure_support_code() -> None: #include #include #include + #include + #include #ifndef M_PI #define M_PI 3.14159265358979323846 @@ -324,16 +326,39 @@ def _ensure_support_code() -> None: inline int32_t int_(T value) {{ return static_cast(value); }} #endif - // Shared RNG for rand/randn/poisson - static std::mt19937 _brian_cppyy_rng; + // Shared RNG for rand/randn/poisson — external linkage so all Cling TUs share one instance + std::mt19937 _brian_cppyy_rng; + std::uniform_real_distribution _dist_rand(0.0, 1.0); + + // Marsaglia polar method state — serializable unlike std::normal_distribution + bool _brian_randn_has_spare = false; + double _brian_randn_spare = 0.0; // Seeding function callable from Python via cppyy.gbl._brian_cppyy_seed() extern "C" void _brian_cppyy_seed(unsigned int seed) {{ _brian_cppyy_rng.seed(seed); + _brian_randn_has_spare = false; }} extern "C" void _brian_cppyy_seed_random() {{ std::random_device rd; _brian_cppyy_rng.seed(rd()); + _brian_randn_has_spare = false; + }} + + // RNG state serialization for get/set_random_state() + extern "C" const char* _brian_cppyy_get_rng_state() {{ + std::ostringstream oss; + oss << _brian_cppyy_rng << " " << (int)_brian_randn_has_spare + << " " << std::setprecision(17) << _brian_randn_spare; + static std::string _rng_state_str; + _rng_state_str = oss.str(); + return _rng_state_str.c_str(); + }} + extern "C" void _brian_cppyy_set_rng_state(const char* state_cstr) {{ + std::istringstream iss(state_cstr); + int has_spare_int; + iss >> _brian_cppyy_rng >> has_spare_int >> _brian_randn_spare; + _brian_randn_has_spare = (bool)has_spare_int; }} // ── Helper to extract a C++ pointer from a PyCapsule ── @@ -383,13 +408,24 @@ def _ensure_support_code() -> None: }} inline double _rand(const int _vectorisation_idx) {{ - static std::uniform_real_distribution _dist_rand(0.0, 1.0); return _dist_rand(_brian_cppyy_rng); }} inline double _randn(const int _vectorisation_idx) {{ - static std::normal_distribution _dist_randn(0.0, 1.0); - return _dist_randn(_brian_cppyy_rng); + if (_brian_randn_has_spare) {{ + _brian_randn_has_spare = false; + return _brian_randn_spare; + }} + double u, v, s; + do {{ + u = _dist_rand(_brian_cppyy_rng) * 2.0 - 1.0; + v = _dist_rand(_brian_cppyy_rng) * 2.0 - 1.0; + s = u * u + v * v; + }} while (s >= 1.0 || s == 0.0); + double factor = std::sqrt(-2.0 * std::log(s) / s); + _brian_randn_spare = v * factor; + _brian_randn_has_spare = true; + return u * factor; }} #endif // _BRIAN2_CPPYY_SUPPORT_CODE diff --git a/brian2/devices/device.py b/brian2/devices/device.py index 45b398e47..9209fd706 100644 --- a/brian2/devices/device.py +++ b/brian2/devices/device.py @@ -632,26 +632,38 @@ def seed(self, seed=None): self.rand_buffer_index[:] = 0 self.randn_buffer_index[:] = 0 - # Also seed the cppyy RNG if the backend is loaded + # Also seed the cppyy RNG if the backend is available. + # _ensure_support_code() compiles the C++ RNG eagerly so that seeding + # takes effect before any code object is compiled. try: + from brian2.codegen.runtime.cppyy_rt.cppyy_rt import _ensure_support_code + + _ensure_support_code() import cppyy - if hasattr(cppyy.gbl, "_brian_cppyy_seed"): - if seed is not None: - cppyy.gbl._brian_cppyy_seed(int(seed) % (2**32)) - else: - cppyy.gbl._brian_cppyy_seed_random() + if seed is not None: + cppyy.gbl._brian_cppyy_seed(int(seed) % (2**32)) + else: + cppyy.gbl._brian_cppyy_seed_random() except (ImportError, AttributeError): pass def get_random_state(self): - return { + state = { "numpy_state": np.random.get_state(), "rand_buffer_index": np.array(self.rand_buffer_index), "rand_buffer": np.array(self.rand_buffer), "randn_buffer_index": np.array(self.randn_buffer_index), "randn_buffer": np.array(self.randn_buffer), } + try: + import cppyy + + if hasattr(cppyy.gbl, "_brian_cppyy_get_rng_state"): + state["cppyy_rng_state"] = str(cppyy.gbl._brian_cppyy_get_rng_state()) + except (ImportError, AttributeError): + pass + return state def set_random_state(self, state): np.random.set_state(state["numpy_state"]) @@ -659,6 +671,14 @@ def set_random_state(self, state): self.rand_buffer[:] = state["rand_buffer"] self.randn_buffer_index[:] = state["randn_buffer_index"] self.randn_buffer[:] = state["randn_buffer"] + if "cppyy_rng_state" in state: + try: + import cppyy + + if hasattr(cppyy.gbl, "_brian_cppyy_set_rng_state"): + cppyy.gbl._brian_cppyy_set_rng_state(state["cppyy_rng_state"]) + except (ImportError, AttributeError): + pass class Dummy: diff --git a/brian2/tests/test_GSL.py b/brian2/tests/test_GSL.py index 2115cd4a6..33be6daac 100644 --- a/brian2/tests/test_GSL.py +++ b/brian2/tests/test_GSL.py @@ -17,10 +17,10 @@ def skip_if_not_implemented(func): @functools.wraps(func) def wrapped(): - if prefs.codegen.target == "numpy" or ( - prefs.codegen.target == "auto" and auto_target().class_name == "numpy" - ): - pytest.skip("GSL support for numpy has not been implemented yet") + target = prefs.codegen.target + effective = auto_target().class_name if target == "auto" else target + if effective in ("numpy", "cppyy"): + pytest.skip(f"GSL support for {effective!r} has not been implemented yet") else: return func()