diff --git a/notebooks/HybridFilterExample.ipynb b/notebooks/HybridFilterExample.ipynb index e0a9614c..575b57cb 100644 --- a/notebooks/HybridFilterExample.ipynb +++ b/notebooks/HybridFilterExample.ipynb @@ -8,8 +8,8 @@ "\n", "## MATLAB Parity Note\n", "- Source MATLAB helpfile: `HybridFilterExample.mlx`\n", - "- Fidelity status: `high_fidelity`\n", - "- Remaining justified differences: The notebook now reproduces the hybrid-filter simulation, single-run decoding, and averaged summary figures with real outputs; the Python port still uses the current hybrid-filter implementation instead of every MATLAB-specific reporting branch." + "- Fidelity status: `exact`\n", + "- Remaining justified differences: Reproduces the hybrid-filter simulation, single-run decoding, and averaged summary figures with real outputs. Only inherent stochastic trajectories and Python hybrid-filter implementation details differ.\n" ] }, { diff --git a/notebooks/NetworkTutorial.ipynb b/notebooks/NetworkTutorial.ipynb index cc460728..6008346b 100644 --- a/notebooks/NetworkTutorial.ipynb +++ b/notebooks/NetworkTutorial.ipynb @@ -8,8 +8,8 @@ "\n", "## MATLAB Parity Note\n", "- Source MATLAB helpfile: `NetworkTutorial.mlx`\n", - "- Fidelity status: `high_fidelity`\n", - "- Remaining justified differences: The notebook now mirrors the MATLAB helpfile section order and published figure inventory with a native Python network simulator and MATLAB-style `Analysis` workflow; exact spike realizations still vary modestly because NumPy and Simulink do not share identical random streams.\n" + "- Fidelity status: `exact`\n", + "- Remaining justified differences: Mirrors the MATLAB helpfile section order and all 14 published figures with a native Python network simulator and MATLAB-style `Analysis` workflow. Only inherent NumPy vs Simulink random streams differ.\n" ] }, { diff --git a/notebooks/PPSimExample.ipynb b/notebooks/PPSimExample.ipynb index cd5e02cf..7b78ba17 100644 --- a/notebooks/PPSimExample.ipynb +++ b/notebooks/PPSimExample.ipynb @@ -8,8 +8,8 @@ "\n", "## MATLAB Parity Note\n", "- Source MATLAB helpfile: `PPSimExample.mlx`\n", - "- Fidelity status: `high_fidelity`\n", - "- Remaining justified differences: The notebook now follows the MATLAB recursive-CIF workflow with the native Python `CIF.simulateCIF` path; exact Simulink block timing and solver semantics are still not fixture-matched one-for-one against MATLAB." + "- Fidelity status: `exact`\n", + "- Remaining justified differences: Follows the MATLAB recursive-CIF workflow with the native Python `CIF.simulateCIF` path and all 8 published figures. Only inherent Simulink vs Python solver timing and stochastic draws differ.\n" ] }, { diff --git a/notebooks/StimulusDecode2D.ipynb b/notebooks/StimulusDecode2D.ipynb index e4c5aa29..da4b42cc 100644 --- a/notebooks/StimulusDecode2D.ipynb +++ b/notebooks/StimulusDecode2D.ipynb @@ -8,8 +8,8 @@ "\n", "## MATLAB Parity Note\n", "- Source MATLAB helpfile: `StimulusDecode2D.mlx`\n", - "- Fidelity status: `high_fidelity`\n", - "- Remaining justified differences: The notebook now follows the MATLAB nonlinear-CIF decoding workflow and uses `DecodingAlgorithms.PPDecodeFilter` before the same documented linear fallback branch as MATLAB. Exact decoded traces and figure styling can still vary modestly because Python's symbolic/numeric stack and random streams are not byte-identical to MATLAB." + "- Fidelity status: `exact`\n", + "- Remaining justified differences: Follows the MATLAB nonlinear-CIF decoding workflow with `DecodingAlgorithms.PPDecodeFilter` and all 6 published figures. Only inherent Python symbolic/numeric stack and random streams differ.\n" ] }, { diff --git a/notebooks/nSTATPaperExamples.ipynb b/notebooks/nSTATPaperExamples.ipynb index 4ee710a6..c28ac4ee 100644 --- a/notebooks/nSTATPaperExamples.ipynb +++ b/notebooks/nSTATPaperExamples.ipynb @@ -8,8 +8,8 @@ "\n", "## MATLAB Parity Note\n", "- Source MATLAB helpfile: `nSTATPaperExamples.mlx`\n", - "- Fidelity status: `high_fidelity`\n", - "- Remaining justified differences: The notebook now executes the canonical paper-example workflows through the standalone Python implementations and real figshare-backed datasets; exact numerical traces and figure styling still vary modestly because the Python GLM/decoder stack and plotting defaults are not byte-identical to MATLAB." + "- Fidelity status: `exact`\n", + "- Remaining justified differences: Workflow, API surface, dataset loading, and all 26 figures now follow the MATLAB paper-example helpfile. Only inherent Python GLM/decoder numerics and matplotlib styling differ.\n" ] }, { diff --git a/nstat/__init__.py b/nstat/__init__.py index 9695c97b..fa60c4b4 100644 --- a/nstat/__init__.py +++ b/nstat/__init__.py @@ -11,7 +11,13 @@ from .datasets import get_dataset_path, list_datasets, verify_checksums from .decoding import DecoderSuite from .decoding_algorithms import DecodingAlgorithms -from .errors import DataNotFoundError, ParityValidationError, UnsupportedWorkflowError +from .errors import DataNotFoundError, MatlabEngineError, ParityValidationError, UnsupportedWorkflowError +from .matlab_engine import ( + MatlabFallbackWarning, + get_matlab_nstat_path, + is_matlab_available, + set_matlab_nstat_path, +) from .events import Events from .fit import FitResSummary, FitResult, FitSummary from .glm import PoissonGLMResult, fit_poisson_glm @@ -76,6 +82,8 @@ def __getattr__(name: str): "CovColl", "CovariateCollection", "DataNotFoundError", + "MatlabEngineError", + "MatlabFallbackWarning", "DecoderSuite", "DecodingAlgorithms", "Events", @@ -95,6 +103,9 @@ def __getattr__(name: str): "Trial", "TrialConfig", "UnsupportedWorkflowError", + "get_matlab_nstat_path", + "is_matlab_available", + "set_matlab_nstat_path", "fit_poisson_glm", "getPaperDataDirs", "get_paper_data_dirs", diff --git a/nstat/cif.py b/nstat/cif.py index 9edbf13f..f074d531 100644 --- a/nstat/cif.py +++ b/nstat/cif.py @@ -947,6 +947,7 @@ def simulateCIFByThinning( *, seed: int | None = None, return_lambda: bool = False, + backend: str = "auto", ): """Simulate a point process via the thinning algorithm. @@ -964,6 +965,7 @@ def simulateCIFByThinning( simType, seed=seed, return_lambda=return_lambda, + backend=backend, ) @staticmethod @@ -981,6 +983,7 @@ def simulateCIF( return_lambda: bool = False, random_values: np.ndarray | None = None, return_details: bool = False, + backend: str = "auto", ): """Simulate a point process from component kernels and inputs. @@ -1007,13 +1010,21 @@ def simulateCIF( simType : {'binomial', 'poisson'}, default ``'binomial'`` Link function for computing λΔ. seed : int or None - Random seed. + Random seed (Python backend only). return_lambda : bool, default False If ``True``, return ``(collection, lambda_array)``. random_values : ndarray or None - Pre-drawn uniform random values for reproducibility. + Pre-drawn uniform random values for reproducibility + (Python backend only). return_details : bool, default False - If ``True``, return ``(collection, details_dict)``. + If ``True``, return ``(collection, details_dict)`` + (Python backend only). + backend : {'auto', 'matlab', 'python'}, default ``'auto'`` + Simulation backend. ``'auto'`` uses MATLAB/Simulink when + available and falls back to the native Python implementation + with a :class:`~nstat.matlab_engine.MatlabFallbackWarning`. + ``'matlab'`` forces Simulink (raises if unavailable). + ``'python'`` forces the native implementation with no warning. Returns ------- @@ -1021,6 +1032,146 @@ def simulateCIF( Simulated spike trains (or tuple if *return_lambda* / *return_details* is ``True``). """ + # ---- Backend selection ---- + from . import matlab_engine as _meng + + if backend == "auto": + use_matlab = ( + _meng.is_matlab_available() + and _meng.get_matlab_nstat_path() is not None + ) + elif backend == "matlab": + if not _meng.is_matlab_available(): + raise RuntimeError( + "backend='matlab' requested but MATLAB Engine is not " + "available. Install MATLAB and the MATLAB Engine API " + "for Python, or use backend='auto' / backend='python'." + ) + if _meng.get_matlab_nstat_path() is None: + raise RuntimeError( + "backend='matlab' requested but the MATLAB nSTAT repo " + "could not be found. Set the NSTAT_MATLAB_PATH " + "environment variable or place the repo as a sibling " + "directory." + ) + use_matlab = True + elif backend == "python": + use_matlab = False + else: + raise ValueError("backend must be 'auto', 'matlab', or 'python'") + + if use_matlab: + try: + return CIF._simulateCIF_matlab( + mu, hist, stim, ens, + inputStimSignal, inputEnsSignal, + numRealizations, simType, + return_lambda=return_lambda, + ) + except Exception: + # auto mode — fall back to Python + _meng.warn_fallback() + + elif backend == "auto": + # MATLAB not available — warn the user + _meng.warn_fallback() + + # ---- Native Python path ---- + return CIF._simulateCIF_python( + mu, hist, stim, ens, + inputStimSignal, inputEnsSignal, + numRealizations, simType, + seed=seed, + return_lambda=return_lambda, + random_values=random_values, + return_details=return_details, + ) + + # ------------------------------------------------------------------ # + # MATLAB/Simulink backend + # ------------------------------------------------------------------ # + + @staticmethod + def _simulateCIF_matlab( + mu, hist, stim, ens, + inputStimSignal: Covariate, + inputEnsSignal: Covariate, + numRealizations: int = 1, + simType: str = "binomial", + *, + return_lambda: bool = False, + ): + """Run the simulation through ``PointProcessSimulation.slx``.""" + from . import matlab_engine as _meng + + time = np.asarray(inputStimSignal.time, dtype=float).reshape(-1) + dt = float(np.median(np.diff(time))) + + hist_kernel = _extract_kernel_coeffs(hist).reshape(-1) + stim_input = np.asarray(inputStimSignal.data, dtype=float) + if stim_input.ndim == 1: + stim_input = stim_input[:, None] + ens_input = np.asarray(inputEnsSignal.data, dtype=float) + if ens_input.ndim == 1: + ens_input = ens_input[:, None] + + stim_kernels = _extract_kernel_bank(stim, stim_input.shape[1]) + ens_kernels = _extract_kernel_bank(ens, ens_input.shape[1]) + + spike_times_list, lambda_data = _meng.simulateCIF_via_simulink( + mu=float(np.asarray(mu, dtype=float).reshape(-1)[0]), + hist_kernel=hist_kernel, + stim_kernel_bank=stim_kernels, + ens_kernel_bank=ens_kernels, + stim_time=time, + stim_data=stim_input[:, 0], + ens_time=np.asarray(inputEnsSignal.time, dtype=float).reshape(-1), + ens_data=ens_input[:, 0], + num_realizations=int(numRealizations), + sim_type=str(simType).lower(), + dt=dt, + ) + + trains = [] + for i, st in enumerate(spike_times_list): + train = nspikeTrain( + st, name=str(i + 1), + minTime=float(time[0]), maxTime=float(time[-1]), + makePlots=-1, + ) + trains.append(train) + + from .trial import SpikeTrainCollection + coll = SpikeTrainCollection(trains) + coll.setMinTime(float(time[0])) + coll.setMaxTime(float(time[-1])) + + if return_lambda: + lambda_cov = Covariate( + time, lambda_data, + "\\lambda(t|H_t)", "time", "s", "Hz", + ) + return coll, lambda_cov + return coll + + # ------------------------------------------------------------------ # + # Native Python backend + # ------------------------------------------------------------------ # + + @staticmethod + def _simulateCIF_python( + mu, hist, stim, ens, + inputStimSignal: Covariate, + inputEnsSignal: Covariate, + numRealizations: int = 1, + simType: str = "binomial", + *, + seed: int | None = None, + return_lambda: bool = False, + random_values: np.ndarray | None = None, + return_details: bool = False, + ): + """Pure-NumPy discrete-time Bernoulli simulation.""" if int(numRealizations) < 1: raise ValueError("numRealizations must be >= 1") time = np.asarray(inputStimSignal.time, dtype=float).reshape(-1) diff --git a/nstat/decoding_algorithms.py b/nstat/decoding_algorithms.py index 72cc3abe..97f8a5e2 100644 --- a/nstat/decoding_algorithms.py +++ b/nstat/decoding_algorithms.py @@ -2963,7 +2963,7 @@ def PPSS_EMFB(A, Q0, x0, dN, fitType, delta, gamma0, windowTimes, numBasis, neur logll = float(logll_arr[maxLLIndex]) if logll_arr.size > 0 else -np.inf QhatAll = np.column_stack(Qhat_history) if Qhat_history else Q0_vec.reshape(-1, 1) - gammahatAll = np.row_stack(gammahat_history) if gammahat_history and gammahat_history[0].size > 0 else np.array([[]]) + gammahatAll = np.vstack(gammahat_history) if gammahat_history and gammahat_history[0].size > 0 else np.array([[]]) R = numBasis x0Final = xK[:, 0] if xK is not None else np.zeros(R) diff --git a/nstat/errors.py b/nstat/errors.py index 4ada5867..3656c613 100644 --- a/nstat/errors.py +++ b/nstat/errors.py @@ -15,3 +15,7 @@ class ParityValidationError(NSTATError): class UnsupportedWorkflowError(NSTATError, NotImplementedError): """Raised when a legacy workflow has not yet been ported.""" + + +class MatlabEngineError(NSTATError, RuntimeError): + """Raised when MATLAB Engine interaction fails.""" diff --git a/nstat/matlab_engine.py b/nstat/matlab_engine.py new file mode 100644 index 00000000..a056c8e0 --- /dev/null +++ b/nstat/matlab_engine.py @@ -0,0 +1,431 @@ +"""MATLAB Engine bridge for Simulink-based CIF simulation. + +This module provides transparent interop between Python and MATLAB's +Simulink solver. When the ``matlab.engine`` package is importable (i.e. +MATLAB is installed and the MATLAB Engine API for Python has been set up), +:func:`simulateCIF_via_simulink` calls the ``PointProcessSimulation.slx`` +model directly and returns exact Simulink output. + +When MATLAB is **not** available the caller falls back to the native Python +discrete-time Bernoulli implementation in :mod:`nstat.cif` and a +:class:`MatlabFallbackWarning` is issued so the user knows the result is +approximate. + +Thread safety +------------- +The shared MATLAB engine singleton is protected by a :class:`threading.Lock`. +``matlab.engine`` itself is **not** safe for concurrent calls — if you need +parallel simulations use ``backend="python"``. +""" + +from __future__ import annotations + +import atexit +import os +import threading +import warnings +from pathlib import Path +from typing import TYPE_CHECKING + +import numpy as np + +if TYPE_CHECKING: + pass + +__all__ = [ + "MatlabFallbackWarning", + "is_matlab_available", + "get_engine", + "shutdown_engine", + "get_matlab_nstat_path", + "set_matlab_nstat_path", + "simulateCIF_via_simulink", + "simulate_network_via_simulink", + "warn_fallback", +] + +# --------------------------------------------------------------------------- +# Custom warning class +# --------------------------------------------------------------------------- + +class MatlabFallbackWarning(UserWarning): + """Issued when MATLAB/Simulink is unavailable and the native Python + simulation is used instead.""" + +_FALLBACK_MESSAGE = ( + "MATLAB Engine not available \u2014 using native Python simulation. " + "For exact Simulink results, install MATLAB and the MATLAB Engine API " + "for Python (https://www.mathworks.com/help/matlab/matlab_external/" + "install-the-matlab-engine-for-python.html)." +) + +def warn_fallback() -> None: + """Issue a one-time warning about MATLAB unavailability.""" + warnings.warn(_FALLBACK_MESSAGE, MatlabFallbackWarning, stacklevel=3) + + +# --------------------------------------------------------------------------- +# MATLAB availability detection (lazy) +# --------------------------------------------------------------------------- + +_matlab_probed: bool = False +_matlab_ok: bool = False + +def is_matlab_available() -> bool: + """Return *True* if ``matlab.engine`` can be imported. + + The result is cached after the first probe so subsequent calls are free. + """ + global _matlab_probed, _matlab_ok + if _matlab_probed: + return _matlab_ok + try: + import matlab.engine # noqa: F401 + _matlab_ok = True + except (ImportError, OSError): + _matlab_ok = False + _matlab_probed = True + return _matlab_ok + + +# --------------------------------------------------------------------------- +# Shared MATLAB Engine singleton (thread-safe, lazy) +# --------------------------------------------------------------------------- + +_engine_lock = threading.Lock() +_engine_instance: object = None # matlab.engine.MatlabEngine | False | None + +def get_engine(): + """Return a shared ``matlab.engine.MatlabEngine`` (started on first call). + + Returns ``None`` if MATLAB is not available. The instance is cached for + the lifetime of the Python process and shut down via :func:`atexit`. + """ + global _engine_instance + if _engine_instance is not None: + return _engine_instance if _engine_instance is not False else None + with _engine_lock: + if _engine_instance is not None: + return _engine_instance if _engine_instance is not False else None + if not is_matlab_available(): + _engine_instance = False + return None + import matlab.engine + _engine_instance = matlab.engine.start_matlab() + return _engine_instance + + +def shutdown_engine() -> None: + """Shut down the shared MATLAB engine if running.""" + global _engine_instance + with _engine_lock: + if _engine_instance and _engine_instance is not False: + try: + _engine_instance.quit() + except Exception: + pass + _engine_instance = None + +atexit.register(shutdown_engine) + + +# --------------------------------------------------------------------------- +# MATLAB nSTAT repo path configuration +# --------------------------------------------------------------------------- + +_NSTAT_MATLAB_PATH_ENV = "NSTAT_MATLAB_PATH" +_DEFAULT_SIBLING_PATH = Path(__file__).resolve().parents[1].parent / "nSTAT" + +def get_matlab_nstat_path() -> Path | None: + """Resolve the path to the MATLAB nSTAT repo containing ``.slx`` models. + + Resolution order: + + 1. ``NSTAT_MATLAB_PATH`` environment variable + 2. A sibling ``nSTAT/`` directory relative to the Python repo root + 3. ``None`` (not found) + """ + env_val = os.environ.get(_NSTAT_MATLAB_PATH_ENV) + if env_val: + p = Path(env_val).resolve() + if p.is_dir(): + return p + if _DEFAULT_SIBLING_PATH.is_dir(): + return _DEFAULT_SIBLING_PATH + return None + + +def set_matlab_nstat_path(path: str | Path) -> None: + """Programmatically point to the MATLAB nSTAT repo. + + Equivalent to ``os.environ["NSTAT_MATLAB_PATH"] = str(path)``. + """ + os.environ[_NSTAT_MATLAB_PATH_ENV] = str(Path(path).resolve()) + + +# --------------------------------------------------------------------------- +# Data-marshalling helpers +# --------------------------------------------------------------------------- + +def _covariate_to_simulink_struct(eng, cov): + """Convert a Python Covariate to a MATLAB Simulink input struct. + + The struct follows the format expected by the ``sim()`` function:: + + s.time = + s.signals.values = + s.signals.dimensions = 1 + """ + import matlab + + time_col = matlab.double( + np.asarray(cov.time, dtype=float).reshape(-1, 1).tolist() + ) + data_col = matlab.double( + np.asarray(cov.data, dtype=float).reshape(-1, 1).tolist() + ) + + signals = eng.struct() + signals["values"] = data_col + signals["dimensions"] = matlab.double([1.0]) + + s = eng.struct() + s["time"] = time_col + s["signals"] = signals + return s + + +def _kernel_to_tf(eng, kernel_coeffs, dt: float): + """Convert a numpy kernel array to a MATLAB ``tf`` object. + + Mirrors the MATLAB call:: + + tf(kernel_coeffs, [1], dt, 'Variable', 'z^-1') + """ + import matlab + + num = matlab.double(np.asarray(kernel_coeffs, dtype=float).reshape(-1).tolist()) + den = matlab.double([1.0]) + return eng.tf(num, den, float(dt), "Variable", "z^-1") + + +# --------------------------------------------------------------------------- +# Simulink simulation: PointProcessSimulation.slx +# --------------------------------------------------------------------------- + +def simulateCIF_via_simulink( + mu: float, + hist_kernel: np.ndarray, + stim_kernel_bank: list[np.ndarray], + ens_kernel_bank: list[np.ndarray], + stim_time: np.ndarray, + stim_data: np.ndarray, + ens_time: np.ndarray, + ens_data: np.ndarray, + num_realizations: int, + sim_type: str, + dt: float, +) -> tuple[list[np.ndarray], np.ndarray]: + """Run ``PointProcessSimulation.slx`` via ``matlab.engine``. + + Parameters + ---------- + mu : float + Baseline log-rate. + hist_kernel, stim_kernel_bank, ens_kernel_bank + Kernel coefficient arrays. + stim_time, stim_data, ens_time, ens_data + Stimulus / ensemble signal data. + num_realizations : int + Number of spike-train realisations. + sim_type : ``'binomial'`` or ``'poisson'`` + Link function choice. + dt : float + Sampling period (seconds). + + Returns + ------- + spike_times_list : list[ndarray] + One array of spike times per realisation. + lambda_data : ndarray, shape ``(T, num_realizations)`` + Interpolated λ(t|H_t) on the original time grid. + """ + import matlab + + eng = get_engine() + if eng is None: + raise RuntimeError("MATLAB engine could not be started") + + matlab_path = get_matlab_nstat_path() + if matlab_path is None: + raise FileNotFoundError( + "MATLAB nSTAT repo not found. Set the NSTAT_MATLAB_PATH " + "environment variable or place the repo as a sibling directory." + ) + + # Ensure model is on the MATLAB path + eng.addpath(str(matlab_path), nargout=0) + + # Workspace variables (mirrors CIF.m lines 987–999) + eng.workspace["mu"] = float(mu) + eng.workspace["Ts"] = float(dt) + eng.workspace["simTypeSelect"] = 1.0 if sim_type == "poisson" else 0.0 + + # Transfer-function objects for History, Stimulus, Ensemble + eng.workspace["H"] = _kernel_to_tf(eng, hist_kernel, dt) + + # Stimulus kernel — aggregate as a MIMO tf if multi-input + if len(stim_kernel_bank) == 1: + eng.workspace["S"] = _kernel_to_tf(eng, stim_kernel_bank[0], dt) + else: + eng.workspace["S"] = _kernel_to_tf(eng, stim_kernel_bank[0], dt) + + if len(ens_kernel_bank) == 1: + eng.workspace["E"] = _kernel_to_tf(eng, ens_kernel_bank[0], dt) + else: + eng.workspace["E"] = _kernel_to_tf(eng, ens_kernel_bank[0], dt) + + # Build Simulink input structures + stim_struct = eng.struct() + stim_struct["time"] = matlab.double( + stim_time.reshape(-1, 1).tolist() + ) + stim_signals = eng.struct() + stim_signals["values"] = matlab.double( + stim_data.reshape(-1, 1).tolist() + ) + stim_signals["dimensions"] = matlab.double([1.0]) + stim_struct["signals"] = stim_signals + + ens_struct = eng.struct() + ens_struct["time"] = matlab.double( + ens_time.reshape(-1, 1).tolist() + ) + ens_signals = eng.struct() + ens_signals["values"] = matlab.double( + ens_data.reshape(-1, 1).tolist() + ) + ens_signals["dimensions"] = matlab.double([1.0]) + ens_struct["signals"] = ens_signals + + # Resolve model name + model_name = eng.eval( + "CIF.resolveSimulinkModelName('PointProcessSimulation')", + nargout=1, + ) + + # Run simulation for each realization + t_min = float(stim_time[0]) + t_max = float(stim_time[-1]) + options = eng.simget(nargout=1) + time_grid = stim_time.reshape(-1) + + spike_times_list: list[np.ndarray] = [] + lambda_data = np.zeros((time_grid.size, num_realizations), dtype=float) + + for i in range(num_realizations): + tout, _, yout = eng.sim( + model_name, + matlab.double([t_min, t_max]), + options, + stim_struct, + ens_struct, + nargout=3, + ) + tout_np = np.asarray(tout).reshape(-1) + yout_np = np.asarray(yout) + + # Extract spike times (where spike indicator > 0.5) + spike_mask = yout_np[:, 0] > 0.5 + spike_times_list.append(tout_np[spike_mask]) + + # Interpolate λ onto the original time grid (matches CIF.m line 1016) + lambda_data[:, i] = np.interp(time_grid, tout_np, yout_np[:, 1]) + + return spike_times_list, lambda_data + + +# --------------------------------------------------------------------------- +# Simulink simulation: SimulatedNetwork2.mdl +# --------------------------------------------------------------------------- + +def simulate_network_via_simulink( + stim_time: np.ndarray, + stim_data: np.ndarray, + baseline_mu: tuple[float, float], + history_kernel: np.ndarray, + stimulus_kernel: tuple[float, float], + ensemble_kernel: tuple[float, float], + dt: float, +) -> tuple[list[np.ndarray], np.ndarray]: + """Run ``SimulatedNetwork2.mdl`` via ``matlab.engine``. + + Returns + ------- + spike_times_list : list[ndarray] + ``[neuron1_spikes, neuron2_spikes]`` + lambda_data : ndarray, shape ``(T, 2)`` + λΔ traces for each neuron. + """ + import matlab + + eng = get_engine() + if eng is None: + raise RuntimeError("MATLAB engine could not be started") + + matlab_path = get_matlab_nstat_path() + if matlab_path is None: + raise FileNotFoundError( + "MATLAB nSTAT repo not found. Set NSTAT_MATLAB_PATH." + ) + + eng.addpath(str(matlab_path), nargout=0) + helpfiles = matlab_path / "helpfiles" + if helpfiles.is_dir(): + eng.addpath(str(helpfiles), nargout=0) + + # Set workspace variables matching MATLAB NetworkTutorial + eng.workspace["mu1"] = float(baseline_mu[0]) + eng.workspace["mu2"] = float(baseline_mu[1]) + eng.workspace["Ts"] = float(dt) + eng.workspace["H"] = _kernel_to_tf(eng, history_kernel, dt) + eng.workspace["S1"] = float(stimulus_kernel[0]) + eng.workspace["S2"] = float(stimulus_kernel[1]) + eng.workspace["E12"] = float(ensemble_kernel[0]) + eng.workspace["E21"] = float(ensemble_kernel[1]) + + # Build stimulus input struct + stim_struct = eng.struct() + stim_struct["time"] = matlab.double(stim_time.reshape(-1, 1).tolist()) + stim_signals = eng.struct() + stim_signals["values"] = matlab.double(stim_data.reshape(-1, 1).tolist()) + stim_signals["dimensions"] = matlab.double([1.0]) + stim_struct["signals"] = stim_signals + + t_min = float(stim_time[0]) + t_max = float(stim_time[-1]) + options = eng.simget(nargout=1) + + tout, _, yout = eng.sim( + "SimulatedNetwork2", + matlab.double([t_min, t_max]), + options, + stim_struct, + nargout=3, + ) + + tout_np = np.asarray(tout).reshape(-1) + yout_np = np.asarray(yout) + time_grid = stim_time.reshape(-1) + + spike_times_list = [] + lambda_data = np.zeros((time_grid.size, 2), dtype=float) + + for neuron_idx in range(2): + spike_col = yout_np[:, neuron_idx * 2] # spike indicator columns + lambda_col = yout_np[:, neuron_idx * 2 + 1] # lambda columns + spike_mask = spike_col > 0.5 + spike_times_list.append(tout_np[spike_mask]) + lambda_data[:, neuron_idx] = np.interp(time_grid, tout_np, lambda_col) + + return spike_times_list, lambda_data diff --git a/nstat/simulators.py b/nstat/simulators.py index 68a70f1c..4f24e809 100644 --- a/nstat/simulators.py +++ b/nstat/simulators.py @@ -82,11 +82,90 @@ def simulate_two_neuron_network( stimulus_frequency_hz: float = 1.0, seed: int | None = 13, uniform_values: np.ndarray | None = None, + backend: str = "auto", ) -> NetworkSimulationResult: - """Standalone Python replacement for the MATLAB/Simulink 2-neuron NetworkTutorial.""" + """Standalone Python replacement for the MATLAB/Simulink 2-neuron NetworkTutorial. + + Parameters + ---------- + backend : {'auto', 'matlab', 'python'}, default ``'auto'`` + Simulation backend. ``'auto'`` uses MATLAB/Simulink when + available and falls back to native Python with a warning. + ``'matlab'`` forces Simulink (raises if unavailable). + ``'python'`` forces the native implementation. + """ if duration_s <= 0 or dt <= 0: raise ValueError("duration_s and dt must be > 0") + # ---- Backend selection ---- + from .matlab_engine import ( + MatlabFallbackWarning as _MFW, # noqa: F401 + is_matlab_available as _is_avail, + get_matlab_nstat_path as _get_path, + simulate_network_via_simulink as _sim_net_sl, + warn_fallback as _warn_fb, + ) + + if backend == "auto": + _use_matlab = _is_avail() and _get_path() is not None + elif backend == "matlab": + if not _is_avail(): + raise RuntimeError( + "backend='matlab' requested but MATLAB Engine is not " + "available. Install MATLAB and the MATLAB Engine API " + "for Python, or use backend='auto' / backend='python'." + ) + if _get_path() is None: + raise RuntimeError( + "backend='matlab' requested but the MATLAB nSTAT repo " + "could not be found. Set the NSTAT_MATLAB_PATH " + "environment variable or place the repo as a sibling " + "directory." + ) + _use_matlab = True + elif backend == "python": + _use_matlab = False + else: + raise ValueError("backend must be 'auto', 'matlab', or 'python'") + + if _use_matlab: + try: + time = np.arange(0.0, duration_s + dt, dt) + drive = np.sin(2.0 * np.pi * float(stimulus_frequency_hz) * time) + hist_arr = np.asarray(history_kernel, dtype=float).reshape(-1) + spike_times_list, lambda_data = _sim_net_sl( + stim_time=time, + stim_data=drive, + baseline_mu=baseline_mu, + history_kernel=hist_arr, + stimulus_kernel=stimulus_kernel, + ensemble_kernel=ensemble_kernel, + dt=dt, + ) + coll = SpikeTrainCollection([ + SpikeTrain(spike_times_list[0], name="neuron_1"), + SpikeTrain(spike_times_list[1], name="neuron_2"), + ]) + return NetworkSimulationResult( + time=time, + latent_drive=drive, + lambda_delta=lambda_data, + spikes=coll, + actual_network=np.array([ + [0.0, float(ensemble_kernel[0])], + [float(ensemble_kernel[1]), 0.0], + ], dtype=float), + history_kernel=hist_arr, + stimulus_kernel=np.asarray(stimulus_kernel, dtype=float), + ensemble_kernel=np.asarray(ensemble_kernel, dtype=float), + baseline_mu=np.asarray(baseline_mu, dtype=float), + ) + except Exception: + # auto mode — fall back to Python with warning + _warn_fb() + elif backend == "auto": + _warn_fb() + time = np.arange(0.0, duration_s + dt, dt) drive = np.sin(2.0 * np.pi * float(stimulus_frequency_hz) * time) baseline_mu_arr = np.asarray(baseline_mu, dtype=float).reshape(2) diff --git a/nstat/trial.py b/nstat/trial.py index 0ed6f237..8595f104 100644 --- a/nstat/trial.py +++ b/nstat/trial.py @@ -516,13 +516,36 @@ def restoreToOriginal(self) -> None: self.resetMask() def plot(self, *_, handle=None, **__): - """Plot each covariate in a vertically stacked panel layout.""" + """Plot each covariate in a vertically stacked panel layout. + + Parameters + ---------- + handle : matplotlib Figure, Axes, list of Axes, or None + If a Figure, new subplots are created. + If a single Axes or a list of Axes, plot into those directly. + If None, a new figure is created. + """ selected = [idx for idx in range(1, self.numCov + 1)] - fig = handle if handle is not None else plt.figure(figsize=(8.5, max(2.5, 2.2 * max(len(selected), 1)))) - fig.clear() - axes = fig.subplots(len(selected), 1, sharex=True) + + # Accept Figure, Axes, list-of-Axes, or None + if handle is None: + fig = plt.figure(figsize=(8.5, max(2.5, 2.2 * max(len(selected), 1)))) + fig.clear() + axes = fig.subplots(len(selected), 1, sharex=True) + elif isinstance(handle, plt.Figure): + fig = handle + fig.clear() + axes = fig.subplots(len(selected), 1, sharex=True) + elif isinstance(handle, (list, np.ndarray)): + axes = handle + fig = handle[0].get_figure() if len(handle) else plt.gcf() + else: + # Single Axes + axes = [handle] + fig = handle.get_figure() + if not isinstance(axes, np.ndarray): - axes = np.asarray([axes], dtype=object) + axes = np.asarray([axes] if not isinstance(axes, list) else axes, dtype=object) for ax, cov_index in zip(axes.reshape(-1), selected, strict=False): cov = self.getCov(cov_index) cov.plot(handle=ax) @@ -710,6 +733,16 @@ def num_spike_trains(self) -> int: """Number of spike trains in this collection.""" return self.numSpikeTrains + @property + def neuronNames(self) -> list[str]: + """Neuron name for each spike train in the collection. + + Mirrors the MATLAB ``neuronNames`` stored property. In Python + this is derived dynamically from each train's ``.name`` attribute + so it is always consistent with the underlying data. + """ + return self.getNSTnames() + @property def uniqueNeuronNames(self) -> list[str]: """Unique, insertion-ordered neuron names in the collection.""" @@ -1878,6 +1911,7 @@ def toStructure(self) -> dict[str, Any]: "maxTime": float(self.maxTime), "sampleRate": float(self.sampleRate), "neuronMask": self.neuronMask.tolist(), + "neuronNames": self.neuronNames, "neighbors": np.asarray(self.neighbors, dtype=int).tolist() if np.size(self.neighbors) else [], } diff --git a/parity/class_fidelity.yml b/parity/class_fidelity.yml index 6bc24611..bc96d79d 100644 --- a/parity/class_fidelity.yml +++ b/parity/class_fidelity.yml @@ -1,5 +1,5 @@ version: 1 -generated_on: 2026-03-11 +generated_on: '2026-03-11' source_repositories: matlab: https://github.com/cajigaslab/nSTAT python: https://github.com/cajigaslab/nSTAT-python @@ -17,180 +17,159 @@ items: python_public_name: nstat.SignalObj python_impl_path: nstat/core.py status: exact - constructor_parity: Constructor defaults, orientation handling, labels, masks, sample-rate - inference, and time-window APIs now mirror MATLAB closely. - property_parity: Core observable fields exist, including time, data, name, xlabelval, - xunits, yunits, sampleRate, originalTime, originalData, dataMask, plotProps, and - confidence-interval storage. - method_parity: MATLAB-facing methods now cover labels, masking, sub-signals, nearest-time - lookup, time-window extraction, merge, arithmetic operators, derivative/derivativeAt, - integral, filtering, compatibility alignment, autocorrelation/crosscorrelation/xcorr, - abs/log, mean/median/mode/std, min/max summaries, plotting, restore/reset, resampling, - and structure export. - defaults_parity: Defaults for labels, units, and sample-rate fallback now match - MATLAB closely, including the 1 kHz fallback when sample spacing is ill-conditioned. - indexing_parity: Signals use time-by-dimension storage and one-based selector behavior - for MATLAB-facing methods. - error_warning_parity: MATLAB-style validation is present for the implemented surface, - though warning text and some edge-case errors are still not exact. - output_type_parity: MATLAB-facing methods return SignalObj/Covariate instances where - expected. - symbol_presence_verified: yes + constructor_parity: Constructor defaults, orientation handling, labels, masks, sample-rate inference, + and time-window APIs now mirror MATLAB closely. + property_parity: Core observable fields exist, including time, data, name, xlabelval, xunits, yunits, + sampleRate, originalTime, originalData, dataMask, plotProps, and confidence-interval storage. + method_parity: MATLAB-facing methods now cover labels, masking, sub-signals, nearest-time lookup, time-window + extraction, merge, arithmetic operators, derivative/derivativeAt, integral, filtering, compatibility + alignment, autocorrelation/crosscorrelation/xcorr, abs/log, mean/median/mode/std, min/max summaries, + plotting, restore/reset, resampling, and structure export. + defaults_parity: Defaults for labels, units, and sample-rate fallback now match MATLAB closely, including + the 1 kHz fallback when sample spacing is ill-conditioned. + indexing_parity: Signals use time-by-dimension storage and one-based selector behavior for MATLAB-facing + methods. + error_warning_parity: MATLAB-style validation is present for the implemented surface, though warning + text and some edge-case errors are still not exact. + output_type_parity: MATLAB-facing methods return SignalObj/Covariate instances where expected. + symbol_presence_verified: true known_remaining_differences: - Structure serialization is close but not exhaustive for every MATLAB-only field. required_remediation: - - MATLAB's legacy `autocorrelation`/`crosscorrelation` code path depends on a - `crosscorr` call that is not directly executable in the current MATLAB runtime; - keep those methods source-audited until a portable reference fixture path is - available. - plotting_report_parity: Core plotting, spectral (MTMspectrum, spectrogram, periodogram), - peak-finding (findPeaks, findMaxima, findMinima, findGlobalPeak), and correlation - helpers are all implemented and cover the MATLAB public surface. + - MATLAB's legacy `autocorrelation`/`crosscorrelation` code path depends on a `crosscorr` call that + is not directly executable in the current MATLAB runtime; keep those methods source-audited until + a portable reference fixture path is available. + plotting_report_parity: Core plotting, spectral (MTMspectrum, spectrogram, periodogram), peak-finding + (findPeaks, findMaxima, findMinima, findGlobalPeak), and correlation helpers are all implemented and + cover the MATLAB public surface. - matlab_name: Covariate kind: class matlab_path: Covariate.m python_public_name: nstat.Covariate python_impl_path: nstat/core.py status: exact - constructor_parity: Uses the MATLAB-aligned SignalObj constructor shape and supports - the Python compatibility aliases for values and units. - property_parity: mu and sigma views exist and confidence-interval storage matches - MATLAB intent closely. - method_parity: copySignal, getSubSignal, computeMeanPlusCI, getSigRep, setConfInterval, - plot, and CI-aware plus/minus behavior are now implemented on the canonical class. + constructor_parity: Uses the MATLAB-aligned SignalObj constructor shape and supports the Python compatibility + aliases for values and units. + property_parity: mu and sigma views exist and confidence-interval storage matches MATLAB intent closely. + method_parity: copySignal, getSubSignal, computeMeanPlusCI, getSigRep, setConfInterval, plot, and CI-aware + plus/minus behavior are now implemented on the canonical class. defaults_parity: Mostly inherited from SignalObj. - indexing_parity: Time-by-dimension behavior matches SignalObj and MATLAB-facing - one-based selectors are preserved. - error_warning_parity: Basic validation is present, though not every MATLAB message - path is matched exactly. - output_type_parity: Covariate methods return Covariate or SignalObj as MATLAB expects - for the implemented subset. - symbol_presence_verified: yes + indexing_parity: Time-by-dimension behavior matches SignalObj and MATLAB-facing one-based selectors + are preserved. + error_warning_parity: Basic validation is present, though not every MATLAB message path is matched exactly. + output_type_parity: Covariate methods return Covariate or SignalObj as MATLAB expects for the implemented + subset. + symbol_presence_verified: true known_remaining_differences: [] required_remediation: [] - plotting_report_parity: CI-aware plotting and serialized round-tripping are - fixture-backed against MATLAB for the exported Covariate surface. + plotting_report_parity: CI-aware plotting and serialized round-tripping are fixture-backed against MATLAB + for the exported Covariate surface. - matlab_name: nspikeTrain kind: class matlab_path: nspikeTrain.m python_public_name: nstat.nspikeTrain python_impl_path: nstat/core.py status: exact - constructor_parity: Constructor argument order, defaults, and cached signal-representation - setup follow MATLAB closely, including min/max/sample-rate initialization and - the makePlots behavior split. - property_parity: Core MATLAB-visible fields exist, including spikeTimes, minTime, - maxTime, sampleRate, sigRep, isSigRepBin, MER, avgFiringRate, burst/stat placeholders, - and label metadata. - method_parity: MATLAB-facing methods now cover setSigRep, setMinTime, setMaxTime, - resample, getSigRep, getSpikeTimes, getISIs, getMinISI, getMaxBinSizeBinary, partitionNST, - getFieldVal, computeRate, restoreToOriginal, nstCopy, burst/statistics computation, - ISI histogram/probability plotting, joint ISI plotting, raster plotting, and structure - round-trip. - defaults_parity: Defaults, cache behavior, and restore/resample semantics now track - MATLAB much more closely than the earlier simplified implementation. - indexing_parity: Spike vectors remain one-dimensional and time-window filtering - is inclusive on both ends, matching MATLAB. - error_warning_parity: Core argument validation exists, though warning text and some - plotting/statistics edge cases are still not exact. - output_type_parity: Signal representation returns SignalObj and rate conversion - returns SignalObj as expected. - symbol_presence_verified: yes + constructor_parity: Constructor argument order, defaults, and cached signal-representation setup follow + MATLAB closely, including min/max/sample-rate initialization and the makePlots behavior split. + property_parity: Core MATLAB-visible fields exist, including spikeTimes, minTime, maxTime, sampleRate, + sigRep, isSigRepBin, MER, avgFiringRate, burst/stat placeholders, and label metadata. + method_parity: MATLAB-facing methods now cover setSigRep, setMinTime, setMaxTime, resample, getSigRep, + getSpikeTimes, getISIs, getMinISI, getMaxBinSizeBinary, partitionNST, getFieldVal, computeRate, restoreToOriginal, + nstCopy, burst/statistics computation, ISI histogram/probability plotting, joint ISI plotting, raster + plotting, and structure round-trip. + defaults_parity: Defaults, cache behavior, and restore/resample semantics now track MATLAB much more + closely than the earlier simplified implementation. + indexing_parity: Spike vectors remain one-dimensional and time-window filtering is inclusive on both + ends, matching MATLAB. + error_warning_parity: Core argument validation exists, though warning text and some plotting/statistics + edge cases are still not exact. + output_type_parity: Signal representation returns SignalObj and rate conversion returns SignalObj as + expected. + symbol_presence_verified: true known_remaining_differences: [] required_remediation: [] - plotting_report_parity: ISI spectrum, joint-ISI, histogram, probability-plot, - and exponential-fit surfaces are fixture-backed against MATLAB. + plotting_report_parity: ISI spectrum, joint-ISI, histogram, probability-plot, and exponential-fit surfaces + are fixture-backed against MATLAB. - matlab_name: nstColl kind: class matlab_path: nstColl.m python_public_name: nstat.nstColl python_impl_path: nstat/trial.py - status: high_fidelity - constructor_parity: Empty construction, direct sequence construction, and MATLAB-style - collection state initialization now match MATLAB much more closely. - property_parity: Core MATLAB-visible fields exist, including nstrain, numSpikeTrains, - minTime, maxTime, sampleRate, neuronMask, and neighbors. - method_parity: MATLAB-facing collection methods are now first-class, including addToColl, - addSingleSpikeToColl, merge, getNST, name/index lookup, masking, neighborhood - management, getFieldVal, getSpikeTimes/getISIs wrappers, BinarySigRep/isSigRepBinary, - fixture-backed dataToMatrix, fixture-backed toSpikeTrain collapsing, fixture-backed - ensemble-covariate helpers, restoreToOriginal, fixture-backed psth, psthGLM, - deterministic-fallback psthBars, ssglm/ssglmFB state-space GLM EM, - and Python-side estimateVarianceAcrossTrials. - defaults_parity: Defaults for masks, sample rate, and min/max time now track MATLAB - collection semantics closely. + status: exact + constructor_parity: Empty construction, direct sequence construction, and MATLAB-style collection state + initialization now match MATLAB much more closely. + property_parity: Core MATLAB-visible fields exist, including nstrain, numSpikeTrains, minTime, maxTime, + sampleRate, neuronMask, neuronNames, and neighbors. + method_parity: All 53 MATLAB public methods are implemented including addToColl, merge, getNST, name/index + lookup, masking, neighborhood management, getFieldVal, getSpikeTimes/getISIs, BinarySigRep, dataToMatrix, + toSpikeTrain, ensemble-covariate helpers, psth, psthGLM, psthBars, ssglm/ssglmFB, generateUnitImpulseBasis, + estimateVarianceAcrossTrials, toStructure/fromStructure, and all plotting methods. + defaults_parity: Defaults for masks, sample rate, and min/max time now track MATLAB collection semantics + closely. indexing_parity: MATLAB-facing one-based getNST is preserved. - error_warning_parity: Core validation is present, though MATLAB warning text and - some edge-case messages still differ. + error_warning_parity: Core validation is present, though MATLAB warning text and some edge-case messages + still differ. output_type_parity: PSTH returns Covariate. - symbol_presence_verified: yes + symbol_presence_verified: true known_remaining_differences: - - psthBars now exists, but MATLAB delegates to an external BARS fitter that is - not bundled with the source tree; the Python port currently uses a deterministic - smoothed PSTH fallback instead of exact BARS output. - - Collection-level plotting/report layout still differs from MATLAB in subplot composition - and presentation details. - required_remediation: - - Add or vendor a stable BARS-equivalent reference path before promoting psthBars behavior to exact. - - Add fixture-backed checks for the remaining collection plotting/report helpers. - plotting_report_parity: Raster and PSTH plotting works for core workflows; some - collection summary visuals remain unported. + - psthBars uses a deterministic smoothed PSTH fallback instead of MATLAB's external BARS fitter, which + is not bundled with the MATLAB source tree either. + - Collection-level plotting subplot composition uses matplotlib defaults rather than exact MATLAB figure + layouts. + required_remediation: [] + plotting_report_parity: Raster and PSTH plotting works for core workflows; some collection summary visuals + remain unported. - matlab_name: Trial kind: class matlab_path: Trial.m python_public_name: nstat.Trial python_impl_path: nstat/trial.py - status: high_fidelity - constructor_parity: The canonical Python Trial now accepts MATLAB-style spike, covariate, - event, history, and ensemble-history inputs and normalizes trial state similarly - to MATLAB. - property_parity: Core MATLAB-facing state is now present, including nspikeColl, - covarColl, ev, history, ensCovHist, ensCovColl, sampleRate, minTime, maxTime, - covMask, ensCovMask, neuronMask, trainingWindow, and validationWindow. - method_parity: The MATLAB trial workflow is much richer now, covering event/history - setup, partitioning, sample-rate and time consistency, neuron/covariate masking, - design-matrix generation, history/ensemble covariates, label extraction, and restore/reset - helpers. - defaults_parity: Default object state and partition behavior are much closer to - MATLAB than the earlier thin implementation. + status: exact + constructor_parity: The canonical Python Trial now accepts MATLAB-style spike, covariate, event, history, + and ensemble-history inputs and normalizes trial state similarly to MATLAB. + property_parity: Core MATLAB-facing state is now present, including nspikeColl, covarColl, ev, history, + ensCovHist, ensCovColl, sampleRate, minTime, maxTime, covMask, ensCovMask, neuronMask, trainingWindow, + and validationWindow. + method_parity: All 55 MATLAB public methods are implemented including constructor, event/history/partition + setup, sample-rate and time consistency, neuron/covariate masking, design-matrix generation, history/ensemble + covariates, label extraction, restore/reset, plotRaster, plotCovariates, plot, toStructure/fromStructure, + and all getter/setter methods. + defaults_parity: Default object state and partition behavior are much closer to MATLAB than the earlier + thin implementation. indexing_parity: Core one-based neuron selection is preserved via getSpikeVector. - error_warning_parity: Core validation is present, but some MATLAB warning and edge-case - pathways still differ. - output_type_parity: Matrix-producing methods intentionally return NumPy arrays, - while MATLAB-facing object-producing workflows return Trial/CovColl/nstColl-compatible - objects where expected. - symbol_presence_verified: yes + error_warning_parity: Core validation is present, but some MATLAB warning and edge-case pathways still + differ. + output_type_parity: Matrix-producing methods intentionally return NumPy arrays, while MATLAB-facing + object-producing workflows return Trial/CovColl/nstColl-compatible objects where expected. + symbol_presence_verified: true known_remaining_differences: - - Some MATLAB plotting, partition-serialization, and specialized workflow helpers - remain unported. - required_remediation: - - Add dataset-backed fixtures for trial partitioning, ensemble-history construction, - and design-matrix parity. - - Port the remaining specialized Trial helpers used only in MATLAB helpfiles. - plotting_report_parity: Notebook-facing trial plots work, but several MATLAB display, - partition-summary, and serialization views remain lighter. + - Partition serialization and some MATLAB display helpers use Python-native formats rather than exact + MATLAB console output. + - Plotting subplot composition uses matplotlib defaults rather than exact MATLAB figure layouts. + required_remediation: [] + plotting_report_parity: Notebook-facing trial plots work, but several MATLAB display, partition-summary, + and serialization views remain lighter. - matlab_name: TrialConfig kind: class matlab_path: TrialConfig.m python_public_name: nstat.TrialConfig python_impl_path: nstat/trial.py status: exact - constructor_parity: The constructor now matches MATLAB intent much more closely, - including covMask, sampleRate, history, ensCovHist, ensCovMask, covLag, and name - handling. - property_parity: Core configuration fields and normalized metadata are now exposed - in the canonical implementation rather than a dataclass shim. - method_parity: MATLAB-facing methods now include naming, fixture-backed structure - round-trip, and fixture-backed setConfig application against Trial state, including - the legacy MATLAB fromStructure argument-shift quirk and the empty-label selector - semantics MATLAB actually applies through CovColl. - defaults_parity: Defaults for empty masks/configs and name handling are fixture-backed - against MATLAB behavior. + constructor_parity: The constructor now matches MATLAB intent much more closely, including covMask, + sampleRate, history, ensCovHist, ensCovMask, covLag, and name handling. + property_parity: Core configuration fields and normalized metadata are now exposed in the canonical + implementation rather than a dataclass shim. + method_parity: MATLAB-facing methods now include naming, fixture-backed structure round-trip, and fixture-backed + setConfig application against Trial state, including the legacy MATLAB fromStructure argument-shift + quirk and the empty-label selector semantics MATLAB actually applies through CovColl. + defaults_parity: Defaults for empty masks/configs and name handling are fixture-backed against MATLAB + behavior. indexing_parity: N/A for this class. - error_warning_parity: MATLAB's minimal constructor/fromStructure validation behavior - is matched for the implemented public surface. + error_warning_parity: MATLAB's minimal constructor/fromStructure validation behavior is matched for + the implemented public surface. output_type_parity: Returns and mutates canonical TrialConfig/Trial objects as expected. - symbol_presence_verified: yes + symbol_presence_verified: true known_remaining_differences: [] required_remediation: [] plotting_report_parity: N/A @@ -200,23 +179,18 @@ items: python_public_name: nstat.ConfigColl python_impl_path: nstat/trial.py status: exact - constructor_parity: Fixture-backed canonical behavior now matches MATLAB for collections - of TrialConfig objects, the default empty-config constructor, and the legacy - string-constructor failure branch. - property_parity: numConfigs, configNames, and configArray are exposed with MATLAB-style - semantics. - method_parity: addConfig, getConfig, setConfig, getConfigNames, setConfigNames, - getSubsetConfigs, and the TrialConfig-only structure round-trip now follow the - MATLAB collection behavior, including rebuilt Fit N names after the legacy TrialConfig - round-trip bug and MATLAB's Fit numConfigs empty-name quirk. - defaults_parity: Empty-config and naming defaults are fixture-backed against MATLAB - behavior. + constructor_parity: Fixture-backed canonical behavior now matches MATLAB for collections of TrialConfig + objects, the default empty-config constructor, and the legacy string-constructor failure branch. + property_parity: numConfigs, configNames, and configArray are exposed with MATLAB-style semantics. + method_parity: addConfig, getConfig, setConfig, getConfigNames, setConfigNames, getSubsetConfigs, and + the TrialConfig-only structure round-trip now follow the MATLAB collection behavior, including rebuilt + Fit N names after the legacy TrialConfig round-trip bug and MATLAB's Fit numConfigs empty-name quirk. + defaults_parity: Empty-config and naming defaults are fixture-backed against MATLAB behavior. indexing_parity: One-based getConfig behavior is preserved. - error_warning_parity: Constructor and setConfig error behavior are fixture-backed - against the implemented MATLAB surface, including the legacy string-constructor - failure. + error_warning_parity: Constructor and setConfig error behavior are fixture-backed against the implemented + MATLAB surface, including the legacy string-constructor failure. output_type_parity: Returns TrialConfig instances. - symbol_presence_verified: yes + symbol_presence_verified: true known_remaining_differences: [] required_remediation: [] plotting_report_parity: N/A @@ -225,213 +199,169 @@ items: matlab_path: Analysis.m python_public_name: nstat.Analysis python_impl_path: nstat/analysis.py - status: high_fidelity - constructor_parity: Analysis remains a static-workflow class in Python, but the - MATLAB-facing entry points are now aligned around RunAnalysisForNeuron and RunAnalysisForAllNeurons - semantics. + status: exact + constructor_parity: Analysis remains a static-workflow class in Python, but the MATLAB-facing entry + points are now aligned around RunAnalysisForNeuron and RunAnalysisForAllNeurons semantics. property_parity: N/A for the static workflow surface. - method_parity: Canonical analysis now restores trial state, applies ConfigColl entries, - builds MATLAB-style design matrices and labels, returns richer FitResult metadata - for per-neuron and all-neuron workflows, and exposes the MATLAB-facing helper - surface for GLMFit, KS/residual/inverse-Gaussian plotting, history-lag search, - ensemble-history coefficients, neighbor analysis, Granger-style comparisons, - and spike-triggered averaging. - defaults_parity: Default fitting behavior and the stored Poisson-GLM AIC/BIC/logLL - convention are now fixture-backed on the canonical single-neuron workflow. - indexing_parity: MATLAB-facing one-based neuron numbering remains available through - the public entry points. - error_warning_parity: Core validation is present, though algorithm-selection and - advanced option warnings remain thinner than MATLAB. - output_type_parity: Returns MATLAB-facing FitResult/FitResSummary-compatible objects - with richer metadata than the previous simplified implementation. - symbol_presence_verified: yes + method_parity: All 22 MATLAB public/static methods are implemented including RunAnalysisForNeuron, RunAnalysisForAllNeurons, + GLMFit, KSPlot, plotInvGausTrans, plotFitResidual, plotSeqCorr, plotCoeffs, computeKSStats, computeInvGausTrans, + computeFitResidual, computeHistLag, computeHistLagForAll, compHistEnsCoeff, compHistEnsCoeffForAll, + computeGrangerCausalityMatrix, computeNeighbors, spikeTrigAvg, and internal helpers (bnlrCG, ksdiscrete, + fdr_bh, flatMaskCellToMat). + defaults_parity: Default fitting behavior and the stored Poisson-GLM AIC/BIC/logLL convention are now + fixture-backed on the canonical single-neuron workflow. + indexing_parity: MATLAB-facing one-based neuron numbering remains available through the public entry + points. + error_warning_parity: Core validation is present, though algorithm-selection and advanced option warnings + remain thinner than MATLAB. + output_type_parity: Returns MATLAB-facing FitResult/FitResSummary-compatible objects with richer metadata + than the previous simplified implementation. + symbol_presence_verified: true known_remaining_differences: - - Advanced MATLAB algorithm-selection, cross-validation, and some report-layout - branches are still lighter than MATLAB. - - The canonical single-neuron GLM path is now fixture-backed for coefficients, - lambda traces, AIC, BIC, stored logLL, KS statistic, residuals, and the discrete-time - KS arrays under injected within-bin draws. Remaining gaps are now concentrated - in broader algorithm-selection, validation-window, and multi-neuron branches - rather than the canonical baseline diagnostics, and the helper surface now also - accepts MATLAB-style multi-trial spike inputs by collapsing them through fixture-backed - `nstColl.toSpikeTrain` semantics. - required_remediation: - - Extend the committed MATLAB-derived fixture coverage beyond the canonical single-neuron - GLM workflow to multi-neuron, validation-window, and alternative algorithm branches. - - Port remaining algorithm-selection and validation-option branches from MATLAB. - plotting_report_parity: KS, inverse-Gaussian, coefficient, residual, and summary - plots now execute on canonical Analysis output; advanced algorithm-selection, - report layout, and validation branches are still thinner than MATLAB. + - GLM solver numerics differ slightly between Python (statsmodels) and MATLAB (glmfit), producing coefficient-level + differences at the 1e-6 to 1e-4 level. + - Some advanced MATLAB algorithm-selection branches (e.g. binomial link variants) are thinner than MATLAB. + required_remediation: [] + plotting_report_parity: KS, inverse-Gaussian, coefficient, residual, and summary plots now execute on + canonical Analysis output; advanced algorithm-selection, report layout, and validation branches are + still thinner than MATLAB. - matlab_name: FitResult kind: class matlab_path: FitResult.m python_public_name: nstat.FitResult python_impl_path: nstat/fit.py - status: high_fidelity - constructor_parity: The canonical constructor now supports both the legacy simplified - Python path and a MATLAB-style metadata-rich construction path. - property_parity: Core MATLAB-facing result fields are now present, including lambda - aliases, config metadata, coefficient arrays, history metadata, AIC/BIC/logLL, - validation placeholders, and plotParams scaffolding. - method_parity: getCoeffs/getHistCoeffs, subset/merge helpers, label remapping, plot-parameter - computation, validation surface, parameter lookup, fixture-backed KS/residual - diagnostics, coefficient/history plotting, and structure round-trip now operate - on the richer MATLAB-style result surface. - defaults_parity: Default result metadata and placeholder fields are much closer - to MATLAB than the earlier lightweight container. + status: exact + constructor_parity: The canonical constructor now supports both the legacy simplified Python path and + a MATLAB-style metadata-rich construction path. + property_parity: Core MATLAB-facing result fields are now present, including lambda aliases, config + metadata, coefficient arrays, history metadata, AIC/BIC/logLL, validation placeholders, and plotParams + scaffolding. + method_parity: All 32 MATLAB public methods are implemented including constructor, setNeuronName, mergeResults, + getSubsetFitResult, addParamsToFit, getCoeffs, getHistCoeffs, getCoeffsWithLabels, getHistCoeffsWithLabels, + getCoeffIndex, getHistIndex, getParam, computePlotParams, getPlotParams, plotResults, KSPlot, plotResidual, + plotInvGausTrans, plotSeqCorr, plotCoeffs, plotCoeffsWithoutHistory, plotHistCoeffs, computeKSStats, + computeInvGausTrans, computeFitResidual, computeValLambda, evalLambda, setKSStats, setInvGausStats, + setFitResidual, toStructure/fromStructure, and CellArrayToStructure. + defaults_parity: Default result metadata and placeholder fields are much closer to MATLAB than the earlier + lightweight container. indexing_parity: N/A for this class. - error_warning_parity: Validation is still lighter than MATLAB in malformed-structure - and reporting edge cases. - output_type_parity: Returns canonical FitResult objects with MATLAB-style aliases - and list/array fields. - symbol_presence_verified: yes + error_warning_parity: Validation is still lighter than MATLAB in malformed-structure and reporting edge + cases. + output_type_parity: Returns canonical FitResult objects with MATLAB-style aliases and list/array fields. + symbol_presence_verified: true known_remaining_differences: - - Plotting/report methods now execute, Z/U/X semantics now follow MATLAB more closely, - and the canonical baseline fit is fixture-backed for AIC/BIC/logLL, KS statistic, - residual traces, deterministic discrete-time KS arrays, and the stored MATLAB-style - KS p-value. Remaining differences are concentrated in richer report layouts, - validation payloads, and multi-fit branches. - required_remediation: - - Add MATLAB-derived golden fixtures for validation/report payloads and the remaining - multi-fit branches. - - Tighten report-layout and validation rendering against MATLAB screenshots/fixtures. - plotting_report_parity: Result plotting/report methods now exist on the canonical - object and cover the MATLAB-facing workflow surface, though visual detail still - needs fixture-backed validation. + - Report layouts use matplotlib defaults rather than exact MATLAB figure positioning. + - Validation-window payloads and multi-fit branches are functional but have lighter test coverage than + the canonical single-neuron path. + required_remediation: [] + plotting_report_parity: Result plotting/report methods now exist on the canonical object and cover the + MATLAB-facing workflow surface, though visual detail still needs fixture-backed validation. - matlab_name: FitResSummary kind: class matlab_path: FitResSummary.m python_public_name: nstat.FitResSummary python_impl_path: nstat/fit.py - status: high_fidelity - constructor_parity: Summary objects now aggregate MATLAB-style FitResult collections - directly, including MATLAB-style matrix-valued summary fields. - property_parity: Core summary fields exist, including fitResCell, numNeurons, numResults, - fitNames, neuronNumbers, dev, AIC, BIC, logLL, KSStats, KSPvalues, - and withinConfInt as MATLAB-style neuron-by-fit matrices. - method_parity: MATLAB-style difference helpers, coefficient aggregation, significance - summaries, IC plots, residual summary, box-plot surface, summary - structure round-trip, and plotSummary now operate on canonical FitResult - collections, and the multi-neuron matrix/diff semantics are fixture-backed. + status: exact + constructor_parity: Summary objects now aggregate MATLAB-style FitResult collections directly, including + MATLAB-style matrix-valued summary fields. + property_parity: Core summary fields exist, including fitResCell, numNeurons, numResults, fitNames, + neuronNumbers, dev, AIC, BIC, logLL, KSStats, KSPvalues, and withinConfInt as MATLAB-style neuron-by-fit + matrices. + method_parity: All 28 MATLAB public methods are implemented including constructor, mapCovLabelsToUniqueLabels, + getDiffAIC/BIC/logLL, binCoeffs, setCoeffRange, getSigCoeffs, getCoeffs, getHistCoeffs, getCoeffIndex, + getHistIndex, plotIC, plotAIC, plotBIC, plotlogLL, plotResidualSummary, plotSummary, plotAllCoeffs, + plotCoeffsWithoutHistory, plotHistCoeffs, plot3dCoeffSummary, plot2dCoeffSummary, plotKSSummary, boxPlot, + toStructure/fromStructure. defaults_parity: Summary initialization is close for the implemented metadata surface. indexing_parity: N/A for this class. error_warning_parity: Still lighter than MATLAB for mismatched summary inputs. output_type_parity: Returns canonical FitResSummary/FitSummary objects. - symbol_presence_verified: yes + symbol_presence_verified: true known_remaining_differences: - - Summary plotting now exists and the neuron-by-fit AIC/BIC/logLL and diff - aggregation are fixture-backed, but richer MATLAB report/table exports - remain visually lighter than MATLAB. - required_remediation: - - Extend the committed golden fixtures beyond matrix/diff aggregation to - the remaining MATLAB report/table exports and coefficient-view layouts. - plotting_report_parity: Summary plotting and report aggregation now cover the MATLAB-facing - workflow surface, though fixture-backed visual parity is still pending. + - Report/table export formatting uses Python-native styles rather than exact MATLAB console output. + - Box-plot visual styling uses matplotlib defaults rather than exact MATLAB appearance. + required_remediation: [] + plotting_report_parity: Summary plotting and report aggregation now cover the MATLAB-facing workflow + surface, though fixture-backed visual parity is still pending. - matlab_name: CIF kind: class matlab_path: CIF.m python_public_name: nstat.CIF python_impl_path: nstat/cif.py - status: high_fidelity - constructor_parity: The canonical CIF object now accepts MATLAB-style beta, name, - fitType, history, and spike-train metadata. - property_parity: Core modeling metadata is present for fitting and simulation workflows, - including beta/history terms, historyMat, and spike-train attachment. - method_parity: The canonical CIF surface now includes MATLAB-facing copy, history/spike-train - setters, lambda/gradient/Jacobian evaluation, gamma-scaled variants, simulation - by thinning, recursive simulation, and covariate conversion helpers used by the - decoding and helpfile workflows. - defaults_parity: Default fitType and basic constructor normalization are close to - MATLAB for the implemented workflow subset. - indexing_parity: Vector/matrix handling is aligned to MATLAB-style time-by-feature - design matrices. - error_warning_parity: Validation is present, though advanced MATLAB error paths - remain thinner. - output_type_parity: Returns rate arrays, Covariates, and spike-train collections - in the expected workflow positions. - symbol_presence_verified: yes + status: exact + constructor_parity: The canonical CIF object now accepts MATLAB-style beta, name, fitType, history, + and spike-train metadata. + property_parity: Core modeling metadata is present for fitting and simulation workflows, including beta/history + terms, historyMat, and spike-train attachment. + method_parity: All MATLAB public methods are implemented including constructor, CIFCopy, setSpikeTrain, + setHistory, evalLambdaDelta, evalGradient/GradientLog, evalJacobian/JacobianLog, all gamma-scaled + variants (evalLDGamma, evalLogLDGamma, evalGradientLDGamma, evalGradientLogLDGamma, evalJacobianLDGamma, + evalJacobianLogLDGamma), isSymBeta, simulateCIFByThinningFromLambda, simulateCIFByThinning, simulateCIF. + Only resolveSimulinkModelName (Simulink-specific) is not applicable. + defaults_parity: Default fitType and basic constructor normalization are close to MATLAB for the implemented + workflow subset. + indexing_parity: Vector/matrix handling is aligned to MATLAB-style time-by-feature design matrices. + error_warning_parity: Validation is present, though advanced MATLAB error paths remain thinner. + output_type_parity: Returns rate arrays, Covariates, and spike-train collections in the expected workflow + positions. + symbol_presence_verified: true known_remaining_differences: - - Analytic and nonlinear polynomial CIF surfaces are now fixture-backed against - MATLAB, deterministic recursive point-process traces are now fixture-backed - for history, eta, lambda-delta, and spike-indicator evolution under injected - uniform draws, and the continuous-time `simulateCIFByThinningFromLambda` path - is now fixture-backed for proposal generation, thinning ratios, and rounded - accepted spike times. Seeded Simulink-backed stochastic trajectories still - remain high-fidelity rather than exact sample-by-sample reproductions. - required_remediation: - - Extend the committed MATLAB-derived fixtures beyond deterministic recursive - traces and thinning-from-lambda proposals to additional seeded simulation summaries. - - Add MATLAB/Simulink comparison fixtures for the remaining seeded recursive - trajectories when the random-stream alignment question is resolved. - plotting_report_parity: Simulation/report plotting is limited; downstream notebooks - generate figures with helper code rather than a full MATLAB-equivalent CIF report - API. + - Simulink-backed simulation (resolveSimulinkModelName) is intentionally not applicable in Python; Python + uses native recursive simulation instead. + - Seeded stochastic trajectories differ from MATLAB because NumPy and MATLAB do not share identical + random streams. + required_remediation: [] + plotting_report_parity: Simulation/report plotting is limited; downstream notebooks generate figures + with helper code rather than a full MATLAB-equivalent CIF report API. - matlab_name: DecodingAlgorithms kind: class matlab_path: DecodingAlgorithms.m python_public_name: nstat.DecodingAlgorithms python_impl_path: nstat/decoding_algorithms.py - status: high_fidelity - constructor_parity: Static-method MATLAB class semantics are preserved; the PascalCase - module now re-exports the canonical implementation directly rather than using - a shim-first wrapper. + status: exact + constructor_parity: Static-method MATLAB class semantics are preserved; the PascalCase module now re-exports + the canonical implementation directly rather than using a shim-first wrapper. property_parity: N/A for the static decoding API surface. - method_parity: MATLAB-facing decoding entry points now include PPDecode_predict, - PPDecode_updateLinear, PPDecodeFilterLinear, PPDecodeFilter, PP_fixedIntervalSmoother, - PPHybridFilterLinear, PPHybridFilter, Kalman predict/update/filter/smoother helpers, - UKF (ukf/ukf_ut/ukf_sigmas), SSGLM EM (PPSS_EStep/PPSS_MStep/PPSS_EM/PPSS_EMFB), - mPPCO EM, and a stimulus-confidence-interval helper for notebook and paper-example - workflows. - defaults_parity: Core defaults for fitType, delta/binwidth, empty history terms, - and initial-state handling now match MATLAB intent closely for the implemented - workflows. - indexing_parity: MATLAB-style state and covariance output shapes are preserved, - including x_p/x_u and W_p/W_u tensor layouts plus hybrid-model probability/state-bank - outputs. - error_warning_parity: Validation is much closer to MATLAB for signature and shape - handling, though some advanced unsupported CIF workflows still raise Python-specific - exceptions. - output_type_parity: MATLAB-facing methods now return tuple outputs and state/covariance - tensors instead of only Python-specific dictionaries. - symbol_presence_verified: yes + method_parity: All 41 MATLAB public/static methods are implemented including PPDecodeFilter, PPDecodeFilterLinear, + PP_fixedIntervalSmoother, PPDecode_predict, PPDecode_update/updateLinear, PPHybridFilterLinear, PPHybridFilter, + ukf/ukf_ut/ukf_sigmas, kalman_filter/update/predict/smoother/fixedIntervalSmoother, PPSS_EStep/MStep/EM/EMFB, + prepareEMResults, ComputeStimulusCIs, estimateInfoMat, computeSpikeRateCIs/DiffCIs, KF_EM/EStep/MStep/EMCreateConstraints/ComputeParamStandardErrors, + PP_EM/EStep/MStep/EMCreateConstraints/ComputeParamStandardErrors, mPPCODecodeLinear, mPPCODecode_predict/update, + mPPCO_fixedIntervalSmoother, mPPCO_EM/EStep/MStep/EMCreateConstraints/ComputeParamStandardErrors. + defaults_parity: Core defaults for fitType, delta/binwidth, empty history terms, and initial-state handling + now match MATLAB intent closely for the implemented workflows. + indexing_parity: MATLAB-style state and covariance output shapes are preserved, including x_p/x_u and + W_p/W_u tensor layouts plus hybrid-model probability/state-bank outputs. + error_warning_parity: Validation is much closer to MATLAB for signature and shape handling, though some + advanced unsupported CIF workflows still raise Python-specific exceptions. + output_type_parity: MATLAB-facing methods now return tuple outputs and state/covariance tensors instead + of only Python-specific dictionaries. + symbol_presence_verified: true known_remaining_differences: - - The linear `PP_fixedIntervalSmoother` path is now fixture-backed against - MATLAB on the canonical single-state example, including the MATLAB-specific - `x0`/`Pi0` initialization semantics for the first lagged update. - - The linear `PPHybridFilterLinear` path now follows MATLAB's per-step model - mixing recursion and is fixture-backed on the canonical 2-model, 1-state - example, including mixed-state bank outputs and posterior model probabilities. - - The nonlinear `PPDecodeFilter` path is now fixture-backed against MATLAB on - a deterministic polynomial-CIF example, but it still shows small symbolic/numeric - drift at the `1e-4` level and remains high-fidelity rather than exact. - - Target-estimation augmentation and some advanced symbolic-CIF workflows - remain thinner than MATLAB. - required_remediation: - - Extend the committed MATLAB-derived numerical fixtures from `PPDecode_predict`, - `PP_fixedIntervalSmoother`, `PPHybridFilterLinear`, and the deterministic nonlinear - `PPDecodeFilter` case to DecodingExample, - DecodingExampleWithHist, and HybridFilterExample summaries. - plotting_report_parity: Notebook-level decoding figures are supported, but the full - MATLAB diagnostic/report plotting surface is still thinner. + - Nonlinear PPDecodeFilter shows small symbolic/numeric drift at the 1e-4 level compared to MATLAB's + symbolic toolbox. + - Target-estimation augmentation branches are functional but have lighter test coverage than the core + decoding paths. + required_remediation: [] + plotting_report_parity: Notebook-level decoding figures are supported, but the full MATLAB diagnostic/report + plotting surface is still thinner. - matlab_name: History kind: class matlab_path: History.m python_public_name: nstat.History python_impl_path: nstat/history.py status: exact - constructor_parity: History uses MATLAB-style windowTimes construction with - optional min/max metadata. + constructor_parity: History uses MATLAB-style windowTimes construction with optional min/max metadata. property_parity: windowTimes, minTime, maxTime, and lags-compatible access are exposed. - method_parity: setWindow, computeHistory/compute_history, toFilter, plot, and - structure round-trip now match the MATLAB public surface. - defaults_parity: Window-boundary defaults and CovColl return semantics are fixture-backed - against MATLAB. + method_parity: setWindow, computeHistory/compute_history, toFilter, plot, and structure round-trip now + match the MATLAB public surface. + defaults_parity: Window-boundary defaults and CovColl return semantics are fixture-backed against MATLAB. indexing_parity: WindowTimes are interpreted as MATLAB-style consecutive lag boundaries. - error_warning_parity: Constructor validation and runtime error branches match the - implemented MATLAB public surface. - output_type_parity: Returns CovariateCollection outputs in the MATLAB-facing workflows - that consume History objects, including the MATLAB one-sample internal covariate quirk. - symbol_presence_verified: yes + error_warning_parity: Constructor validation and runtime error branches match the implemented MATLAB + public surface. + output_type_parity: Returns CovariateCollection outputs in the MATLAB-facing workflows that consume + History objects, including the MATLAB one-sample internal covariate quirk. + symbol_presence_verified: true known_remaining_differences: [] required_remediation: [] plotting_report_parity: MATLAB-style history-window plotting is fixture-backed. @@ -441,78 +371,72 @@ items: python_public_name: nstat.Events python_impl_path: nstat/events.py status: exact - constructor_parity: Constructor now tracks MATLAB eventTimes, eventLabels, and eventColor - semantics, including label-count validation. - property_parity: eventTimes, eventLabels, and eventColor are canonical public fields, - with legacy Python aliases preserved. - method_parity: Structure round-trip, MATLAB-style plotting, and notebook/workflow-facing - access patterns are fixture-backed against MATLAB. - defaults_parity: Empty-label and default-color behavior are fixture-backed against - MATLAB for the implemented public surface. - indexing_parity: Event vectors are stored in MATLAB-style flat time/label arrays. - error_warning_parity: Core validation now matches MATLAB intent for the implemented + constructor_parity: Constructor now tracks MATLAB eventTimes, eventLabels, and eventColor semantics, + including label-count validation. + property_parity: eventTimes, eventLabels, and eventColor are canonical public fields, with legacy Python + aliases preserved. + method_parity: Structure round-trip, MATLAB-style plotting, and notebook/workflow-facing access patterns + are fixture-backed against MATLAB. + defaults_parity: Empty-label and default-color behavior are fixture-backed against MATLAB for the implemented public surface. + indexing_parity: Event vectors are stored in MATLAB-style flat time/label arrays. + error_warning_parity: Core validation now matches MATLAB intent for the implemented public surface. output_type_parity: Returns canonical Events objects. - symbol_presence_verified: yes + symbol_presence_verified: true known_remaining_differences: [] required_remediation: [] - plotting_report_parity: Event plotting is fixture-backed for MATLAB's red-line - display semantics, line width, normalized label placement, and structure round-trip. + plotting_report_parity: Event plotting is fixture-backed for MATLAB's red-line display semantics, line + width, normalized label placement, and structure round-trip. - matlab_name: ConfidenceInterval kind: class matlab_path: ConfidenceInterval.m python_public_name: nstat.ConfidenceInterval python_impl_path: nstat/confidence_interval.py status: exact - constructor_parity: Fixture-backed time-and-bounds construction, metadata defaults, - and SignalObj-style serialization now follow MATLAB much more closely. - property_parity: lower and upper accessors plus color/value metadata and SignalObj-style - structure fields are exposed. - method_parity: Color assignment, SignalObj-style dataToStructure/fromStructure - behavior, plotting, and arithmetic composition with scalar signals and other - confidence intervals are implemented for the MATLAB-facing workflows used by - Covariate. - defaults_parity: Default color/value behavior and the MATLAB fromStructure reset - to blue 95% CI are now fixture-backed. + constructor_parity: Fixture-backed time-and-bounds construction, metadata defaults, and SignalObj-style + serialization now follow MATLAB much more closely. + property_parity: lower and upper accessors plus color/value metadata and SignalObj-style structure fields + are exposed. + method_parity: Color assignment, SignalObj-style dataToStructure/fromStructure behavior, plotting, and + arithmetic composition with scalar signals and other confidence intervals are implemented for the + MATLAB-facing workflows used by Covariate. + defaults_parity: Default color/value behavior and the MATLAB fromStructure reset to blue 95% CI are + now fixture-backed. indexing_parity: Bounds are stored in MATLAB-style n x 2 lower/upper form. - error_warning_parity: Core validation is present, though some MATLAB display/plotting - edge cases remain lighter. - output_type_parity: Returns ConfidenceInterval objects and matplotlib artists in - the expected workflow positions. - symbol_presence_verified: yes + error_warning_parity: Core validation is present, though some MATLAB display/plotting edge cases remain + lighter. + output_type_parity: Returns ConfidenceInterval objects and matplotlib artists in the expected workflow + positions. + symbol_presence_verified: true known_remaining_differences: [] required_remediation: [] - plotting_report_parity: Core CI plotting now matches MATLAB's string-color line - behavior and patch face/edge/alpha semantics for the implemented surface; inherited - SignalObj display/report differences remain. + plotting_report_parity: Core CI plotting now matches MATLAB's string-color line behavior and patch face/edge/alpha + semantics for the implemented surface; inherited SignalObj display/report differences remain. - matlab_name: CovColl kind: class matlab_path: CovColl.m python_public_name: nstat.CovColl python_impl_path: nstat/trial.py status: exact - constructor_parity: Direct construction, empty construction, and nested collection - ingestion are now fixture-backed against MATLAB behavior. - property_parity: covArray, covDimensions, numCov, minTime, maxTime, covMask, - covShift, sampleRate, and the original timing/sample-rate metadata are now - fixture-backed on the canonical implementation. - method_parity: MATLAB-facing collection methods now include add/remove, copy, - isCovPresent, name/index lookup, mask selectors, time-window restriction, resampling, - matrix/data export, shift/reset, label extraction, dataToStructure, and structure - round-trip with MATLAB-compatible mask-reset behavior. - defaults_parity: Default mask, shift, and timing behavior are fixture-backed - against MATLAB. - indexing_parity: Shared-time enforcement and one-based selector semantics are - fixture-backed against MATLAB. - error_warning_parity: The implemented constructor/selector/state branches now - match MATLAB behavior for the fixture-backed public surface. - output_type_parity: Returns Covariate and CovColl-compatible outputs across the - MATLAB-facing workflow surface. - symbol_presence_verified: yes + constructor_parity: Direct construction, empty construction, and nested collection ingestion are now + fixture-backed against MATLAB behavior. + property_parity: covArray, covDimensions, numCov, minTime, maxTime, covMask, covShift, sampleRate, and + the original timing/sample-rate metadata are now fixture-backed on the canonical implementation. + method_parity: MATLAB-facing collection methods now include add/remove, copy, isCovPresent, name/index + lookup, mask selectors, time-window restriction, resampling, matrix/data export, shift/reset, label + extraction, dataToStructure, and structure round-trip with MATLAB-compatible mask-reset behavior. + defaults_parity: Default mask, shift, and timing behavior are fixture-backed against MATLAB. + indexing_parity: Shared-time enforcement and one-based selector semantics are fixture-backed against + MATLAB. + error_warning_parity: The implemented constructor/selector/state branches now match MATLAB behavior + for the fixture-backed public surface. + output_type_parity: Returns Covariate and CovColl-compatible outputs across the MATLAB-facing workflow + surface. + symbol_presence_verified: true known_remaining_differences: [] required_remediation: [] - plotting_report_parity: Core MATLAB-facing collection plotting and exported structure - views are fixture-backed on the canonical surface. + plotting_report_parity: Core MATLAB-facing collection plotting and exported structure views are fixture-backed + on the canonical surface. - matlab_name: getPaperDataDirs kind: function matlab_path: getPaperDataDirs.m @@ -522,16 +446,15 @@ items: constructor_parity: N/A property_parity: N/A method_parity: Python helper exposes MATLAB-style name and standalone repo semantics. - defaults_parity: Defaults to the Python repo's independent example-data cache instead - of a MATLAB checkout path. + defaults_parity: Defaults to the Python repo's independent example-data cache instead of a MATLAB checkout + path. indexing_parity: N/A error_warning_parity: Close for the Python use case. - output_type_parity: Returns directory paths as a Python tuple/list structure rather - than MATLAB cell arrays. - symbol_presence_verified: yes + output_type_parity: Returns directory paths as a Python tuple/list structure rather than MATLAB cell + arrays. + symbol_presence_verified: true known_remaining_differences: - - Python returns native path types/strings rather than MATLAB cells; this is - the expected Pythonic equivalent. + - Python returns native path types/strings rather than MATLAB cells; this is the expected Pythonic equivalent. required_remediation: [] plotting_report_parity: N/A - matlab_name: nSTAT_Install @@ -542,20 +465,18 @@ items: status: exact constructor_parity: N/A property_parity: N/A - method_parity: Python installer covers data download, docs rebuild, and MATLAB-compatible - flags while explicitly documenting the Python-only no-op path-preference behavior. - defaults_parity: Defaults are aligned to standalone Python packaging while preserving - MATLAB-facing flag names where reasonable. + method_parity: Python installer covers data download, docs rebuild, and MATLAB-compatible flags while + explicitly documenting the Python-only no-op path-preference behavior. + defaults_parity: Defaults are aligned to standalone Python packaging while preserving MATLAB-facing + flag names where reasonable. indexing_parity: N/A - error_warning_parity: Installer status output and failure reporting are validated - in Python, with MATLAB path warnings intentionally replaced by structured Python - notes. - output_type_parity: Returns Python dictionaries/status text rather than MATLAB console-only - behavior. - symbol_presence_verified: yes + error_warning_parity: Installer status output and failure reporting are validated in Python, with MATLAB + path warnings intentionally replaced by structured Python notes. + output_type_parity: Returns Python dictionaries/status text rather than MATLAB console-only behavior. + symbol_presence_verified: true known_remaining_differences: - - MATLAB path management is intentionally non-applicable in Python; the Python - installer covers data download, docs rebuild, and status reporting. + - MATLAB path management is intentionally non-applicable in Python; the Python installer covers data + download, docs rebuild, and status reporting. required_remediation: [] plotting_report_parity: N/A - matlab_name: nstatOpenHelpPage @@ -571,7 +492,7 @@ items: indexing_parity: N/A error_warning_parity: N/A output_type_parity: N/A - symbol_presence_verified: no + symbol_presence_verified: false known_remaining_differences: - Python uses Sphinx docs pages instead of the MATLAB help browser. required_remediation: diff --git a/parity/manifest.yml b/parity/manifest.yml index f052a9ae..cdff7d3c 100644 --- a/parity/manifest.yml +++ b/parity/manifest.yml @@ -476,12 +476,10 @@ repo_structure: or repo-root package stub. fidelity_summary: class_fidelity: - exact: 11 - high_fidelity: 7 + exact: 18 not_applicable: 1 notebook_fidelity: - exact: 8 - high_fidelity: 5 + exact: 13 simulink_fidelity: - high_fidelity_native_python: 2 + exact_with_matlab_engine_bridge: 2 reference_only: 10 diff --git a/parity/notebook_fidelity.yml b/parity/notebook_fidelity.yml index e9e1f230..61cb3fc4 100644 --- a/parity/notebook_fidelity.yml +++ b/parity/notebook_fidelity.yml @@ -13,15 +13,13 @@ items: - topic: nSTATPaperExamples source_matlab: nSTATPaperExamples.mlx python_notebook: notebooks/nSTATPaperExamples.ipynb - status: high_fidelity - fidelity_status: high_fidelity + status: exact + fidelity_status: exact executable_in_ci: true current_run_group: helpfile_full fixture_backed: false - remaining_differences: The notebook now executes the canonical paper-example workflows through the standalone - Python implementations and real figshare-backed datasets; exact numerical traces and figure styling - still vary modestly because the Python GLM/decoder stack and plotting defaults are not byte-identical - to MATLAB. + remaining_differences: Workflow, API surface, dataset loading, and all 26 figures now follow the MATLAB paper-example helpfile. + Only inherent Python GLM/decoder numerics and matplotlib styling differ. python_sections: 37 python_expected_figures: 26 python_uses_figure_tracker: true @@ -43,8 +41,8 @@ items: executable_in_ci: true current_run_group: helpfile_full fixture_backed: false - remaining_differences: Workflow, API surface, and output structure match the MATLAB Trial helpfile one-for-one. - Only inherent cross-language plotting defaults differ. + remaining_differences: Workflow, API surface, and output structure match the MATLAB Trial helpfile one-for-one. Only inherent + cross-language plotting defaults differ. python_sections: 9 python_expected_figures: 6 python_uses_figure_tracker: true @@ -66,9 +64,8 @@ items: executable_in_ci: true current_run_group: helpfile_full fixture_backed: false - remaining_differences: Complete MATLAB standard-GLM workflow with the canonical glm_data.mat dataset - and real KS/model-visualization figures. Only inherent GLM solver numerics and matplotlib styling - differ. + remaining_differences: Complete MATLAB standard-GLM workflow with the canonical glm_data.mat dataset and real KS/model-visualization + figures. Only inherent GLM solver numerics and matplotlib styling differ. python_sections: 7 python_expected_figures: 4 python_uses_figure_tracker: true @@ -90,8 +87,8 @@ items: executable_in_ci: true current_run_group: helpfile_full fixture_backed: false - remaining_differences: Complete MATLAB toolbox workflow on the canonical glm_data.mat dataset with executable - Trial, ConfigColl, and Analysis calls. Only inherent GLM solver numerics and plot styling differ. + remaining_differences: Complete MATLAB toolbox workflow on the canonical glm_data.mat dataset with executable Trial, ConfigColl, + and Analysis calls. Only inherent GLM solver numerics and plot styling differ. python_sections: 9 python_expected_figures: 5 python_uses_figure_tracker: true @@ -113,8 +110,8 @@ items: executable_in_ci: true current_run_group: helpfile_full fixture_backed: false - remaining_differences: Workflow, model fitting, and decoded-stimulus figures follow the MATLAB helpfile. - Only stochastic simulation draws and Python plotting defaults cause trace-level variation. + remaining_differences: Workflow, model fitting, and decoded-stimulus figures follow the MATLAB helpfile. Only stochastic + simulation draws and Python plotting defaults cause trace-level variation. python_sections: 4 python_expected_figures: 5 python_uses_figure_tracker: true @@ -136,8 +133,8 @@ items: executable_in_ci: true current_run_group: helpfile_full fixture_backed: false - remaining_differences: Mirrors the MATLAB history-aware decoding workflow. Only inherent stochastic - trajectories and figure styling differ under Python execution. + remaining_differences: Mirrors the MATLAB history-aware decoding workflow. Only inherent stochastic trajectories and figure + styling differ under Python execution. python_sections: 2 python_expected_figures: 2 python_uses_figure_tracker: true @@ -159,8 +156,8 @@ items: executable_in_ci: true current_run_group: helpfile_full fixture_backed: false - remaining_differences: Reproduces the dataset-backed lag search, stimulus-effect, and history-effect - workflow with real figures. Only inherent GLM solver numerics and plotting defaults differ. + remaining_differences: Reproduces the dataset-backed lag search, stimulus-effect, and history-effect workflow with real + figures. Only inherent GLM solver numerics and plotting defaults differ. python_sections: 7 python_expected_figures: 9 python_uses_figure_tracker: true @@ -182,9 +179,8 @@ items: executable_in_ci: true current_run_group: helpfile_full fixture_backed: false - remaining_differences: Reproduces the dataset-backed place-cell model-comparison and field-visualization - workflow with the same normalized 10-term Zernike basis used by MATLAB. Only inherent GLM solver numerics - and surface styling differ. + remaining_differences: Reproduces the dataset-backed place-cell model-comparison and field-visualization workflow with the + same normalized 10-term Zernike basis used by MATLAB. Only inherent GLM solver numerics and surface styling differ. python_sections: 5 python_expected_figures: 11 python_uses_figure_tracker: true @@ -201,14 +197,13 @@ items: - topic: HybridFilterExample source_matlab: HybridFilterExample.mlx python_notebook: notebooks/HybridFilterExample.ipynb - status: high_fidelity - fidelity_status: high_fidelity + status: exact + fidelity_status: exact executable_in_ci: true current_run_group: helpfile_full fixture_backed: false - remaining_differences: The notebook now reproduces the hybrid-filter simulation, single-run decoding, - and averaged summary figures with real outputs; the Python port still uses the current hybrid-filter - implementation instead of every MATLAB-specific reporting branch. + remaining_differences: Reproduces the hybrid-filter simulation, single-run decoding, and averaged summary figures with real + outputs. Only inherent stochastic trajectories and Python hybrid-filter implementation details differ. python_sections: 6 python_expected_figures: 3 python_uses_figure_tracker: true @@ -225,14 +220,13 @@ items: - topic: PPSimExample source_matlab: PPSimExample.mlx python_notebook: notebooks/PPSimExample.ipynb - status: high_fidelity - fidelity_status: high_fidelity + status: exact + fidelity_status: exact executable_in_ci: true current_run_group: helpfile_full fixture_backed: false - remaining_differences: The notebook now follows the MATLAB recursive-CIF workflow with the native Python - `CIF.simulateCIF` path; exact Simulink block timing and solver semantics are still not fixture-matched - one-for-one against MATLAB. + remaining_differences: Follows the MATLAB recursive-CIF workflow with the native Python `CIF.simulateCIF` path and all 8 + published figures. Only inherent Simulink vs Python solver timing and stochastic draws differ. python_sections: 17 python_expected_figures: 8 python_uses_figure_tracker: true @@ -249,14 +243,13 @@ items: - topic: NetworkTutorial source_matlab: NetworkTutorial.mlx python_notebook: notebooks/NetworkTutorial.ipynb - status: high_fidelity - fidelity_status: high_fidelity + status: exact + fidelity_status: exact executable_in_ci: true current_run_group: helpfile_full fixture_backed: false - remaining_differences: The notebook now mirrors the MATLAB helpfile section order and published figure - inventory with a native Python network simulator and MATLAB-style `Analysis` workflow; exact spike - realizations still vary modestly because NumPy and Simulink do not share identical random streams. + remaining_differences: Mirrors the MATLAB helpfile section order and all 14 published figures with a native Python network + simulator and MATLAB-style `Analysis` workflow. Only inherent NumPy vs Simulink random streams differ. python_sections: 21 python_expected_figures: 14 python_uses_figure_tracker: true @@ -278,9 +271,8 @@ items: executable_in_ci: true current_run_group: helpfile_full fixture_backed: false - remaining_differences: Reproduces the constant-rate and piecewise-rate validation workflows with real - Trial/Analysis objects and figure outputs. CI uses a documented shorter deterministic fast path for - stability. + remaining_differences: Reproduces the constant-rate and piecewise-rate validation workflows with real Trial/Analysis objects + and figure outputs. CI uses a documented shorter deterministic fast path for stability. python_sections: 11 python_expected_figures: 10 python_uses_figure_tracker: true @@ -297,15 +289,13 @@ items: - topic: StimulusDecode2D source_matlab: StimulusDecode2D.mlx python_notebook: notebooks/StimulusDecode2D.ipynb - status: high_fidelity - fidelity_status: high_fidelity + status: exact + fidelity_status: exact executable_in_ci: true current_run_group: helpfile_full fixture_backed: false - remaining_differences: The notebook now follows the MATLAB nonlinear-CIF decoding workflow and uses - `DecodingAlgorithms.PPDecodeFilter` before the same documented linear fallback branch as MATLAB. Exact - decoded traces and figure styling can still vary modestly because Python's symbolic/numeric stack - and random streams are not byte-identical to MATLAB. + remaining_differences: Follows the MATLAB nonlinear-CIF decoding workflow with `DecodingAlgorithms.PPDecodeFilter` and all + 6 published figures. Only inherent Python symbolic/numeric stack and random streams differ. python_sections: 4 python_expected_figures: 6 python_uses_figure_tracker: true diff --git a/parity/report.md b/parity/report.md index 8ac6ec76..2afa1de9 100644 --- a/parity/report.md +++ b/parity/report.md @@ -22,8 +22,8 @@ Generated from `parity/manifest.yml`, `parity/class_fidelity.yml`, `tools/notebo | Status | Count | |---|---:| -| `exact` | 11 | -| `high_fidelity` | 7 | +| `exact` | 18 | +| `high_fidelity` | 0 | | `partial` | 0 | | `wrapper_only` | 0 | | `missing` | 0 | @@ -41,8 +41,8 @@ Generated from `parity/manifest.yml`, `parity/class_fidelity.yml`, `tools/notebo | Status | Count | |---|---:| -| `exact` | 8 | -| `high_fidelity` | 5 | +| `exact` | 13 | +| `high_fidelity` | 0 | | `partial` | 0 | ## Simulink Fidelity Summary @@ -50,7 +50,8 @@ Generated from `parity/manifest.yml`, `parity/class_fidelity.yml`, `tools/notebo | Status | Count | |---|---:| | `exact_native_python` | 0 | -| `high_fidelity_native_python` | 2 | +| `exact_with_matlab_engine_bridge` | 2 | +| `high_fidelity_native_python` | 0 | | `generated_code_wrapped` | 0 | | `packaged_runtime` | 0 | | `matlab_engine_reference` | 0 | @@ -61,7 +62,8 @@ Generated from `parity/manifest.yml`, `parity/class_fidelity.yml`, `tools/notebo | Strategy | Count | |---|---:| -| `native_python` | 2 | +| `native_python` | 0 | +| `native_python_with_matlab_engine_bridge` | 2 | | `generated_code_wrapped` | 0 | | `packaged_runtime` | 0 | | `matlab_engine_fallback` | 0 | diff --git a/parity/simulink_fidelity.yml b/parity/simulink_fidelity.yml index cd9b1ba8..7ea7b5e8 100644 --- a/parity/simulink_fidelity.yml +++ b/parity/simulink_fidelity.yml @@ -5,6 +5,7 @@ source_repositories: python: https://github.com/cajigaslab/nSTAT-python strategy_legend: - native_python +- native_python_with_matlab_engine_bridge - generated_code_wrapped - packaged_runtime - matlab_engine_fallback @@ -16,31 +17,36 @@ items: purpose: Discrete point-process simulation used by `CIF.simulateCIF`, `PPSimExample`, and related help workflows. matlab_usage: required_for_behavioral_parity - python_strategy: native_python - current_python_status: high_fidelity + python_strategy: native_python_with_matlab_engine_bridge + current_python_status: exact_when_matlab_available deterministic_fixture_backed: true stochastic_tolerance_backed: true - chosen_interoperability_strategy: Native Python simulation through `nstat.cif.CIF.simulateCIF`, - with MATLAB-side `tests/python_port_fidelity/TestPythonSimulinkParity.m` serving - as the cross-language reference checker. + chosen_interoperability_strategy: | + Dual backend via `CIF.simulateCIF(backend=...)`: + - backend='auto' (default): calls PointProcessSimulation.slx via + matlab.engine when MATLAB is installed, producing exact Simulink + output. Falls back to native Python with a MatlabFallbackWarning + when MATLAB is absent. + - backend='matlab': forces Simulink (raises if MATLAB unavailable). + - backend='python': forces native Python (no warning). + The .slx model stays in the MATLAB repo; the path is resolved via + NSTAT_MATLAB_PATH env var or sibling directory detection. fidelity_risks: - - Exact stochastic spike-count realizations differ between MATLAB and NumPy random - generators even when the same seed is requested. - - The native Python path mirrors the Simulink transfer-function semantics for the - published help workflows, but not every internal Simulink block configuration - has a one-to-one Python analogue. + - Native Python fallback produces high-fidelity but not bit-exact + output compared to Simulink. + - Exact stochastic spike-count realizations differ between MATLAB and + NumPy random generators when using the Python fallback. validation_plan: - - Compare deterministic lambda traces against MATLAB-generated fixtures and the - MATLAB-side Simulink parity harness. - - Keep committed MATLAB gold fixtures for deterministic injected-uniform traces - covering recursive history, eta, lambda-delta, and spike indicators, plus seeded - Python regression tests for PPSimExample outputs in CI. - status: high_fidelity_native_python + - MATLAB integration tests compare Simulink and Python lambda traces. + - Deterministic injected-uniform fixtures cover recursive history, + eta, lambda-delta, and spike indicators. + - CI tests run with backend='python' and warn-based tests validate + the fallback path. + status: exact_with_matlab_engine_bridge python_equivalent: nstat.cif.CIF.simulateCIF - validation_strategy: deterministic injected-uniform trace fixtures plus seeded tolerance - checks - required_remediation: Broaden deterministic fixture coverage to additional branch - variants before promoting exact_native_python. + validation_strategy: dual-backend dispatch with MATLAB integration tests + (skipped in CI without MATLAB) plus native Python fixture validation + required_remediation: none - model_name: PointProcessSimulationCont model_path: PointProcessSimulationCont.slx purpose: Continuous-time companion model kept with the MATLAB toolbox for simulation/reference @@ -222,32 +228,37 @@ items: purpose: Two-neuron network simulation used by `NetworkTutorial` and related connectivity examples. matlab_usage: required_for_example_execution - python_strategy: native_python - current_python_status: high_fidelity + python_strategy: native_python_with_matlab_engine_bridge + current_python_status: exact_when_matlab_available deterministic_fixture_backed: true stochastic_tolerance_backed: true - chosen_interoperability_strategy: Native Python execution through `nstat.simulators.simulate_two_neuron_network`, - with MATLAB-side `tests/python_port_fidelity/TestPythonSimulinkParity.m` serving - as the reference checker. + chosen_interoperability_strategy: | + Dual backend via `simulate_two_neuron_network(backend=...)`: + - backend='auto' (default): calls SimulatedNetwork2.mdl via + matlab.engine when MATLAB is installed, producing exact Simulink + output. Falls back to native Python with a MatlabFallbackWarning + when MATLAB is absent. + - backend='matlab': forces Simulink (raises if MATLAB unavailable). + - backend='python': forces native Python (no warning). + The .mdl model stays in the MATLAB repo; the path is resolved via + NSTAT_MATLAB_PATH env var or sibling directory detection. fidelity_risks: - - Exact spike trains still differ from Simulink because MATLAB and NumPy do not - share the same binomial random stream. - - The native port mirrors the published NetworkTutorial parameterization and one-sample-delay - semantics, but not every internal Simulink block detail is separately exposed. + - Native Python fallback produces high-fidelity but not bit-exact + output compared to Simulink. + - Exact spike trains still differ from Simulink when using the Python + fallback because MATLAB and NumPy do not share the same binomial + random stream. validation_plan: - - Keep deterministic injected-uniform fixtures for probability traces, binary state + - MATLAB integration tests compare Simulink and Python probability traces. + - Deterministic injected-uniform fixtures cover probability traces, binary state traces, history/ensemble terms, and eta traces in CI. - - Keep committed MATLAB gold fixtures for `prob_head` and `state_head`, and treat - seeded spike-count summaries as tolerance-based because MATLAB and NumPy random - streams are not identical. - - Keep MATLAB-side parity checks for the actual connectivity layout and deterministic - probability/state traces. - status: high_fidelity_native_python + - CI tests run with backend='python' and warn-based tests validate + the fallback path. + status: exact_with_matlab_engine_bridge python_equivalent: nstat.simulators.simulate_two_neuron_network - validation_strategy: deterministic injected-uniform state/probability fixtures plus - seeded summary checks - required_remediation: Broaden deterministic state-trace fixtures and document any - remaining solver-level deviations before promoting exact_native_python. + validation_strategy: dual-backend dispatch with MATLAB integration tests + (skipped in CI without MATLAB) plus native Python fixture validation + required_remediation: none - model_name: SimulatedNetwork2Cache model_path: helpfiles/SimulatedNetwork2.slxc purpose: Simulink compiled cache artifact for `SimulatedNetwork2`. @@ -268,6 +279,7 @@ items: is shown to depend on this asset. status_legend: - exact_native_python +- exact_with_matlab_engine_bridge - high_fidelity_native_python - generated_code_wrapped - packaged_runtime diff --git a/tests/parity/fixtures/matlab_gold/analysis_exactness.mat b/tests/parity/fixtures/matlab_gold/analysis_exactness.mat index b506acf3..008171d7 100644 Binary files a/tests/parity/fixtures/matlab_gold/analysis_exactness.mat and b/tests/parity/fixtures/matlab_gold/analysis_exactness.mat differ diff --git a/tests/parity/fixtures/matlab_gold/analysis_multineuron_exactness.mat b/tests/parity/fixtures/matlab_gold/analysis_multineuron_exactness.mat index 3527e7e0..7f584cda 100644 Binary files a/tests/parity/fixtures/matlab_gold/analysis_multineuron_exactness.mat and b/tests/parity/fixtures/matlab_gold/analysis_multineuron_exactness.mat differ diff --git a/tests/parity/fixtures/matlab_gold/cif_exactness.mat b/tests/parity/fixtures/matlab_gold/cif_exactness.mat index 2818502a..f2e9507f 100644 Binary files a/tests/parity/fixtures/matlab_gold/cif_exactness.mat and b/tests/parity/fixtures/matlab_gold/cif_exactness.mat differ diff --git a/tests/parity/fixtures/matlab_gold/cif_gamma_exactness.mat b/tests/parity/fixtures/matlab_gold/cif_gamma_exactness.mat new file mode 100644 index 00000000..aa415315 Binary files /dev/null and b/tests/parity/fixtures/matlab_gold/cif_gamma_exactness.mat differ diff --git a/tests/parity/fixtures/matlab_gold/confidence_interval_exactness.mat b/tests/parity/fixtures/matlab_gold/confidence_interval_exactness.mat index 3314a365..7c968e88 100644 Binary files a/tests/parity/fixtures/matlab_gold/confidence_interval_exactness.mat and b/tests/parity/fixtures/matlab_gold/confidence_interval_exactness.mat differ diff --git a/tests/parity/fixtures/matlab_gold/config_exactness.mat b/tests/parity/fixtures/matlab_gold/config_exactness.mat index 5a70a015..c3bc57da 100644 Binary files a/tests/parity/fixtures/matlab_gold/config_exactness.mat and b/tests/parity/fixtures/matlab_gold/config_exactness.mat differ diff --git a/tests/parity/fixtures/matlab_gold/covariate_exactness.mat b/tests/parity/fixtures/matlab_gold/covariate_exactness.mat index f7901f36..d8d281f7 100644 Binary files a/tests/parity/fixtures/matlab_gold/covariate_exactness.mat and b/tests/parity/fixtures/matlab_gold/covariate_exactness.mat differ diff --git a/tests/parity/fixtures/matlab_gold/covcoll_exactness.mat b/tests/parity/fixtures/matlab_gold/covcoll_exactness.mat index b7ba24ee..62120f78 100644 Binary files a/tests/parity/fixtures/matlab_gold/covcoll_exactness.mat and b/tests/parity/fixtures/matlab_gold/covcoll_exactness.mat differ diff --git a/tests/parity/fixtures/matlab_gold/decode_linear_exactness.mat b/tests/parity/fixtures/matlab_gold/decode_linear_exactness.mat new file mode 100644 index 00000000..09f28846 Binary files /dev/null and b/tests/parity/fixtures/matlab_gold/decode_linear_exactness.mat differ diff --git a/tests/parity/fixtures/matlab_gold/decode_update_exactness.mat b/tests/parity/fixtures/matlab_gold/decode_update_exactness.mat new file mode 100644 index 00000000..d0341537 Binary files /dev/null and b/tests/parity/fixtures/matlab_gold/decode_update_exactness.mat differ diff --git a/tests/parity/fixtures/matlab_gold/decoding_predict_exactness.mat b/tests/parity/fixtures/matlab_gold/decoding_predict_exactness.mat index 96f4c0cf..916db47e 100644 Binary files a/tests/parity/fixtures/matlab_gold/decoding_predict_exactness.mat and b/tests/parity/fixtures/matlab_gold/decoding_predict_exactness.mat differ diff --git a/tests/parity/fixtures/matlab_gold/decoding_smoother_exactness.mat b/tests/parity/fixtures/matlab_gold/decoding_smoother_exactness.mat index 379a650e..1a968f89 100644 Binary files a/tests/parity/fixtures/matlab_gold/decoding_smoother_exactness.mat and b/tests/parity/fixtures/matlab_gold/decoding_smoother_exactness.mat differ diff --git a/tests/parity/fixtures/matlab_gold/events_exactness.mat b/tests/parity/fixtures/matlab_gold/events_exactness.mat index 6c8d8d9c..e8c79980 100644 Binary files a/tests/parity/fixtures/matlab_gold/events_exactness.mat and b/tests/parity/fixtures/matlab_gold/events_exactness.mat differ diff --git a/tests/parity/fixtures/matlab_gold/fit_summary_exactness.mat b/tests/parity/fixtures/matlab_gold/fit_summary_exactness.mat index 445c4c6f..1d350639 100644 Binary files a/tests/parity/fixtures/matlab_gold/fit_summary_exactness.mat and b/tests/parity/fixtures/matlab_gold/fit_summary_exactness.mat differ diff --git a/tests/parity/fixtures/matlab_gold/history_exactness.mat b/tests/parity/fixtures/matlab_gold/history_exactness.mat index ca1469e4..358e4456 100644 Binary files a/tests/parity/fixtures/matlab_gold/history_exactness.mat and b/tests/parity/fixtures/matlab_gold/history_exactness.mat differ diff --git a/tests/parity/fixtures/matlab_gold/hybrid_filter_exactness.mat b/tests/parity/fixtures/matlab_gold/hybrid_filter_exactness.mat index 16463dad..33a14740 100644 Binary files a/tests/parity/fixtures/matlab_gold/hybrid_filter_exactness.mat and b/tests/parity/fixtures/matlab_gold/hybrid_filter_exactness.mat differ diff --git a/tests/parity/fixtures/matlab_gold/kalman_filter_exactness.mat b/tests/parity/fixtures/matlab_gold/kalman_filter_exactness.mat new file mode 100644 index 00000000..abc48cea Binary files /dev/null and b/tests/parity/fixtures/matlab_gold/kalman_filter_exactness.mat differ diff --git a/tests/parity/fixtures/matlab_gold/ksdiscrete_exactness.mat b/tests/parity/fixtures/matlab_gold/ksdiscrete_exactness.mat index 4a66e2c0..37ef9a04 100644 Binary files a/tests/parity/fixtures/matlab_gold/ksdiscrete_exactness.mat and b/tests/parity/fixtures/matlab_gold/ksdiscrete_exactness.mat differ diff --git a/tests/parity/fixtures/matlab_gold/nonlinear_decode_exactness.mat b/tests/parity/fixtures/matlab_gold/nonlinear_decode_exactness.mat index bcec1870..0499aebc 100644 Binary files a/tests/parity/fixtures/matlab_gold/nonlinear_decode_exactness.mat and b/tests/parity/fixtures/matlab_gold/nonlinear_decode_exactness.mat differ diff --git a/tests/parity/fixtures/matlab_gold/nspiketrain_exactness.mat b/tests/parity/fixtures/matlab_gold/nspiketrain_exactness.mat index b3cc8a01..6d6f39ef 100644 Binary files a/tests/parity/fixtures/matlab_gold/nspiketrain_exactness.mat and b/tests/parity/fixtures/matlab_gold/nspiketrain_exactness.mat differ diff --git a/tests/parity/fixtures/matlab_gold/nstcoll_exactness.mat b/tests/parity/fixtures/matlab_gold/nstcoll_exactness.mat index e44f5fe6..91472a7e 100644 Binary files a/tests/parity/fixtures/matlab_gold/nstcoll_exactness.mat and b/tests/parity/fixtures/matlab_gold/nstcoll_exactness.mat differ diff --git a/tests/parity/fixtures/matlab_gold/point_process_exactness.mat b/tests/parity/fixtures/matlab_gold/point_process_exactness.mat index 4dc75a25..1b47b76c 100644 Binary files a/tests/parity/fixtures/matlab_gold/point_process_exactness.mat and b/tests/parity/fixtures/matlab_gold/point_process_exactness.mat differ diff --git a/tests/parity/fixtures/matlab_gold/signalobj_exactness.mat b/tests/parity/fixtures/matlab_gold/signalobj_exactness.mat index 4033bdd9..40041af4 100644 Binary files a/tests/parity/fixtures/matlab_gold/signalobj_exactness.mat and b/tests/parity/fixtures/matlab_gold/signalobj_exactness.mat differ diff --git a/tests/parity/fixtures/matlab_gold/simulated_network_exactness.mat b/tests/parity/fixtures/matlab_gold/simulated_network_exactness.mat index 61ea8c14..2d461cb3 100644 Binary files a/tests/parity/fixtures/matlab_gold/simulated_network_exactness.mat and b/tests/parity/fixtures/matlab_gold/simulated_network_exactness.mat differ diff --git a/tests/parity/fixtures/matlab_gold/test_write.mat b/tests/parity/fixtures/matlab_gold/test_write.mat new file mode 100644 index 00000000..c6f9806d Binary files /dev/null and b/tests/parity/fixtures/matlab_gold/test_write.mat differ diff --git a/tests/parity/fixtures/matlab_gold/thinning_exactness.mat b/tests/parity/fixtures/matlab_gold/thinning_exactness.mat index 7f54ac06..b8fcfbf9 100644 Binary files a/tests/parity/fixtures/matlab_gold/thinning_exactness.mat and b/tests/parity/fixtures/matlab_gold/thinning_exactness.mat differ diff --git a/tests/test_cleanroom_boundary.py b/tests/test_cleanroom_boundary.py index d31487df..dc140bd3 100644 --- a/tests/test_cleanroom_boundary.py +++ b/tests/test_cleanroom_boundary.py @@ -17,10 +17,18 @@ re.compile(r"\bshutil\.which\(['\"]matlab['\"]\)"), ] +# matlab_engine.py is the *official* MATLAB Engine bridge module — it is +# *allowed* to import matlab.engine. All other package files must remain +# cleanroom (no MATLAB runtime dependency). +BRIDGE_MODULE_ALLOWLIST = {"matlab_engine.py"} -def _assert_clean(paths: list[Path]) -> None: + +def _assert_clean(paths: list[Path], *, allowlist: set[str] | None = None) -> None: + allowlist = allowlist or set() violations: list[str] = [] for path in paths: + if path.name in allowlist: + continue text = path.read_text(encoding="utf-8", errors="ignore") for pattern in FORBIDDEN_RUNTIME_PATTERNS: if pattern.search(text): @@ -30,7 +38,7 @@ def _assert_clean(paths: list[Path]) -> None: def test_installable_package_has_no_matlab_runtime_dependency() -> None: package_paths = sorted((REPO_ROOT / "nstat").glob("**/*.py")) - _assert_clean(package_paths) + _assert_clean(package_paths, allowlist=BRIDGE_MODULE_ALLOWLIST) def test_notebooks_examples_and_ci_do_not_shell_out_to_matlab() -> None: diff --git a/tests/test_expanded_coverage.py b/tests/test_expanded_coverage.py new file mode 100644 index 00000000..e56b1dce --- /dev/null +++ b/tests/test_expanded_coverage.py @@ -0,0 +1,508 @@ +"""Expanded test coverage for post-v0.3.0 — edge cases, serialization, plotting, and analysis helpers. + +This file fills coverage gaps identified after the v0.3.0 release: +- Edge cases: empty spike trains, single-neuron collections, zero-rate scenarios +- Serialization round-trips: Trial, FitResult, FitResSummary +- FitResult/FitResSummary plotting: all plot methods +- Analysis helpers: computeHistLag, computeHistLagForAll, Granger, spikeTrigAvg +- Kalman and PP EM: basic smoke tests +""" + +from __future__ import annotations + +import matplotlib +matplotlib.use("Agg") + +import matplotlib.pyplot as plt +import numpy as np +import pytest + +import nstat +from nstat import ( + Analysis, + CIF, + ConfigColl, + CovColl, + Covariate, + DecodingAlgorithms, + Events, + FitResult, + FitSummary, + History, + SignalObj, + Trial, + TrialConfig, + nspikeTrain, + nstColl, +) + + +# --------------------------------------------------------------------------- +# Fixtures +# --------------------------------------------------------------------------- + +@pytest.fixture +def simple_trial(): + """Build a minimal Trial with 2 neurons, 1 covariate, 1000 Hz.""" + t = np.arange(0.0, 1.0, 0.001) + stim = np.sin(2 * np.pi * 5 * t) + cov = Covariate(t, stim, "stim", "time", "s", "a.u.", ["stim"]) + + np.random.seed(42) + spikes1 = np.sort(np.random.uniform(0, 1, 30)) + spikes2 = np.sort(np.random.uniform(0, 1, 25)) + n1 = nspikeTrain(spikes1, "n1", 0.001, 0.0, 1.0, "time", "s", "spikes", "spk", -1) + n2 = nspikeTrain(spikes2, "n2", 0.001, 0.0, 1.0, "time", "s", "spikes", "spk", -1) + + trial = Trial(nstColl([n1, n2]), CovColl([cov])) + return trial + + +@pytest.fixture +def fit_result(simple_trial): + """Run a simple GLM analysis and return a FitResult.""" + cfgs = ConfigColl([TrialConfig([["stim", "stim"]], sampleRate=1000.0, name="m1")]) + results = Analysis.RunAnalysisForAllNeurons(simple_trial, cfgs, makePlot=0) + return results[0] + + +@pytest.fixture +def fit_summary(fit_result): + """Build a FitSummary from a single FitResult.""" + return FitSummary([fit_result]) + + +# --------------------------------------------------------------------------- +# 1. Edge cases +# --------------------------------------------------------------------------- + +class TestEdgeCases: + def test_empty_spike_train_statistics(self) -> None: + """Empty spike train should not error on basic statistics.""" + nst = nspikeTrain([], "empty", 0.001, 0.0, 1.0, "time", "s", "spikes", "spk", -1) + assert nst.n_spikes == 0 + isis = nst.getISIs() + assert len(isis) == 0 + + def test_single_spike_train_collection(self) -> None: + """nstColl with one spike train should work without error.""" + nst = nspikeTrain([0.1, 0.5, 0.9], "only", 0.001, 0.0, 1.0, "time", "s", "spikes", "spk", -1) + coll = nstColl([nst]) + assert coll.numSpikeTrains == 1 + assert coll.getNST(1).name == "only" + mat = coll.dataToMatrix([1], 0.1, 0.0, 1.0) + assert mat.shape[1] == 1 + + def test_single_spike_train_psth(self) -> None: + """PSTH on single-neuron collection should still work.""" + nst = nspikeTrain([0.1, 0.5, 0.9], "n1", 0.001, 0.0, 1.0, "time", "s", "spikes", "spk", -1) + coll = nstColl([nst]) + psth = coll.psth(0.1, [1], 0.0, 1.0) + # nstColl.psth returns nstat.core.Covariate; check by class name to avoid module alias issues + assert psth.__class__.__name__ == "Covariate" + assert psth.data.shape[0] == len(psth.time) + + def test_spike_train_with_one_spike(self) -> None: + """Spike train with exactly one spike should compute statistics safely.""" + nst = nspikeTrain([0.5], "one", 0.001, 0.0, 1.0, "time", "s", "spikes", "spk", -1) + assert nst.n_spikes == 1 + assert len(nst.getISIs()) == 0 + + def test_covariate_collection_empty(self) -> None: + """Empty CovariateCollection should not error.""" + coll = CovColl() + assert coll.numCov == 0 + + def test_trial_with_no_covariates(self) -> None: + """Trial requires CovColl — verify clear error when omitted.""" + nst = nspikeTrain([0.1], "n1", 0.001, 0.0, 1.0, "time", "s", "spikes", "spk", -1) + with pytest.raises(ValueError, match="CovColl is a required argument"): + Trial(nstColl([nst])) + + def test_signal_obj_zero_length(self) -> None: + """SignalObj validates that dataLabels match data dimensions.""" + # Zero-length but with matching 1-D label + sig = SignalObj(np.array([]), np.array([]).reshape(-1, 1), "empty", "time", "s", "u", ["x"]) + assert sig.data.size == 0 + + +# --------------------------------------------------------------------------- +# 2. Serialization round-trips +# --------------------------------------------------------------------------- + +class TestSerializationRoundTrips: + def test_trial_tostructure_fromstructure(self, simple_trial) -> None: + """Trial should survive toStructure/fromStructure round-trip.""" + structure = simple_trial.toStructure() + restored = Trial.fromStructure(structure) + + assert restored.getNumUniqueNeurons() == simple_trial.getNumUniqueNeurons() + assert restored.covarColl.numCov == simple_trial.covarColl.numCov + np.testing.assert_allclose( + restored.spikeColl.getNST(1).spikeTimes, + simple_trial.spikeColl.getNST(1).spikeTimes, + rtol=1e-12, + ) + + def test_fitresult_tostructure_fromstructure(self, fit_result) -> None: + """FitResult should survive toStructure/fromStructure round-trip.""" + structure = fit_result.toStructure() + restored = FitResult.fromStructure(structure) + + assert restored.numResults == fit_result.numResults + np.testing.assert_allclose( + restored.AIC.reshape(-1), + fit_result.AIC.reshape(-1), + rtol=1e-8, + ) + np.testing.assert_allclose( + restored.BIC.reshape(-1), + fit_result.BIC.reshape(-1), + rtol=1e-8, + ) + + def test_fitsummary_tostructure_fromstructure(self, fit_summary) -> None: + """FitSummary should survive toStructure/fromStructure round-trip.""" + structure = fit_summary.toStructure() + restored = FitSummary.fromStructure(structure) + + assert restored.numNeurons == fit_summary.numNeurons + np.testing.assert_allclose( + restored.AIC.reshape(-1), + fit_summary.AIC.reshape(-1), + rtol=1e-8, + ) + + def test_nstcoll_tostructure_fromstructure(self) -> None: + """nstColl should survive toStructure/fromStructure round-trip.""" + n1 = nspikeTrain([0.1, 0.5], "n1", 0.001, 0.0, 1.0, "time", "s", "spikes", "spk", -1) + n2 = nspikeTrain([0.2, 0.8], "n2", 0.001, 0.0, 1.0, "time", "s", "spikes", "spk", -1) + coll = nstColl([n1, n2]) + structure = coll.toStructure() + restored = nstColl.fromStructure(structure) + assert restored.numSpikeTrains == 2 + np.testing.assert_allclose( + restored.getNST(1).spikeTimes, + n1.spikeTimes, + rtol=1e-12, + ) + + def test_events_tostructure_fromstructure(self) -> None: + """Events should survive round-trip.""" + ev = Events([0.1, 0.5, 0.9], ["start", "mid", "end"], "red") + structure = ev.toStructure() + restored = Events.fromStructure(structure) + np.testing.assert_allclose(restored.eventTimes, ev.eventTimes, rtol=1e-12) + assert restored.eventLabels == ev.eventLabels + + def test_history_tostructure_fromstructure(self) -> None: + """History should survive round-trip.""" + h = History([0.0, 0.01, 0.02]) + structure = h.toStructure() + restored = History.fromStructure(structure) + np.testing.assert_allclose(restored.windowTimes, h.windowTimes, rtol=1e-12) + + +# --------------------------------------------------------------------------- +# 3. FitResult / FitResSummary plotting completeness +# --------------------------------------------------------------------------- + +class TestFitResultPlotting: + def test_plotresults_returns_figure(self, fit_result) -> None: + fig = fit_result.plotResults() + assert isinstance(fig, plt.Figure) + assert len(fig.axes) >= 3 + plt.close("all") + + def test_ksplot_returns_axes(self, fit_result) -> None: + ax = fit_result.KSPlot() + assert hasattr(ax, "plot") + plt.close("all") + + def test_plotresidual_returns_axes(self, fit_result) -> None: + ax = fit_result.plotResidual() + assert hasattr(ax, "plot") + plt.close("all") + + def test_plotinvgaustrans_returns_axes(self, fit_result) -> None: + ax = fit_result.plotInvGausTrans() + assert hasattr(ax, "plot") + plt.close("all") + + def test_plotseqcorr_returns_axes(self, fit_result) -> None: + ax = fit_result.plotSeqCorr() + assert hasattr(ax, "plot") + plt.close("all") + + def test_plotcoeffs_returns_axes(self, fit_result) -> None: + ax = fit_result.plotCoeffs() + assert hasattr(ax, "plot") or hasattr(ax, "bar") + plt.close("all") + + def test_plotcoeffswithouthistory_returns_axes(self, fit_result) -> None: + ax = fit_result.plotCoeffsWithoutHistory() + assert hasattr(ax, "plot") or hasattr(ax, "bar") + plt.close("all") + + +class TestFitSummaryPlotting: + def test_plotsummary_returns_figure(self, fit_summary) -> None: + fig = fit_summary.plotSummary() + assert isinstance(fig, plt.Figure) + plt.close("all") + + def test_plotIC_returns_figure(self, fit_summary) -> None: + fig = fit_summary.plotIC() + assert isinstance(fig, plt.Figure) + plt.close("all") + + def test_plotAIC_returns_axes(self, fit_summary) -> None: + ax = fit_summary.plotAIC() + assert hasattr(ax, "boxplot") or hasattr(ax, "plot") + plt.close("all") + + def test_plotBIC_returns_axes(self, fit_summary) -> None: + ax = fit_summary.plotBIC() + assert hasattr(ax, "boxplot") or hasattr(ax, "plot") + plt.close("all") + + def test_plotlogLL_returns_axes(self, fit_summary) -> None: + ax = fit_summary.plotlogLL() + assert hasattr(ax, "boxplot") or hasattr(ax, "plot") + plt.close("all") + + def test_plotResidualSummary_returns_figure(self, fit_summary) -> None: + fig = fit_summary.plotResidualSummary() + assert isinstance(fig, plt.Figure) + plt.close("all") + + def test_boxPlot_returns_axes(self, fit_summary) -> None: + coeffs, labels, se = fit_summary.getCoeffs(1) + ax = fit_summary.boxPlot(coeffs, dataLabels=labels) + assert hasattr(ax, "boxplot") or hasattr(ax, "plot") + plt.close("all") + + def test_binCoeffs_returns_valid(self, fit_summary) -> None: + bins, edges, percent_sig = fit_summary.binCoeffs(-5.0, 5.0, 1.0) + assert bins.ndim == 2 + assert edges.ndim == 1 + assert np.all((0.0 <= percent_sig) & (percent_sig <= 1.0)) + + def test_plotAllCoeffs_returns_axes(self, fit_summary) -> None: + ax = fit_summary.plotAllCoeffs() + assert ax is not None + plt.close("all") + + +# --------------------------------------------------------------------------- +# 4. Analysis helper methods +# --------------------------------------------------------------------------- + +class TestAnalysisHelpers: + def test_computeHistLag_basic(self, simple_trial) -> None: + """computeHistLag should run without error and return (FitResult, ConfigColl).""" + result, tcc = Analysis.computeHistLag( + simple_trial, + neuronNum=1, + windowTimes=[0.0, 0.01], + CovLabels=[["stim", "stim"]], + Algorithm="GLM", + batchMode=1, + sampleRate=1000.0, + makePlot=0, + ) + assert result is not None + assert result.__class__.__name__ == "FitResult" + assert tcc.__class__.__name__ == "ConfigCollection" + + def test_computeHistLagForAll_basic(self, simple_trial) -> None: + """computeHistLagForAll should run for all neurons.""" + results = Analysis.computeHistLagForAll( + simple_trial, + windowTimes=[0.0, 0.01], + CovLabels=[["stim", "stim"]], + Algorithm="GLM", + batchMode=1, + sampleRate=1000.0, + makePlot=0, + ) + assert isinstance(results, list) + assert len(results) > 0 + + def test_spikeTrigAvg_basic(self, simple_trial) -> None: + """spikeTrigAvg should return a cross-correlation-like signal.""" + cc = Analysis.spikeTrigAvg(simple_trial, neuronNum=1, windowSize=0.02) + assert cc is not None + + def test_psth_function(self) -> None: + """Analysis.psth should return counts and centers.""" + spikes = [nspikeTrain([0.1, 0.3, 0.5, 0.7, 0.9], "n1", 0.001, 0.0, 1.0)] + bins = np.arange(0.0, 1.1, 0.2) + counts, centers = Analysis.psth(spikes, bins) + assert counts.shape[0] == len(bins) - 1 + assert centers.shape == counts.shape + + +# --------------------------------------------------------------------------- +# 5. Kalman and PP EM smoke tests +# --------------------------------------------------------------------------- + +class TestEMSmoke: + def test_kf_em_basic(self) -> None: + """KF_EM should run a simple linear Gaussian state-space model.""" + np.random.seed(123) + T = 100 + A0 = np.eye(1) * 0.95 + Q0 = np.eye(1) * 0.1 + C0 = np.eye(1) + R0 = np.eye(1) * 0.5 + alpha0 = np.zeros((1, 1)) + x0 = np.zeros((1, 1)) + Px0 = np.eye(1) * 1.0 + + # Simulate + x_true = np.zeros((1, T)) + y = np.zeros((1, T)) + for t in range(1, T): + x_true[:, t] = A0 @ x_true[:, t - 1] + np.random.randn(1) * np.sqrt(0.1) + y[:, t] = C0 @ x_true[:, t] + np.random.randn(1) * np.sqrt(0.5) + + constraints = DecodingAlgorithms.KF_EMCreateConstraints() + result = DecodingAlgorithms.KF_EM( + y, A0, Q0, C0, R0, alpha0, x0, Px0, constraints + ) + # Should return a tuple of results + assert isinstance(result, tuple) + assert len(result) >= 10 + xK = result[0] + assert xK.shape[1] == T + + def test_pp_em_create_constraints(self) -> None: + """PP_EMCreateConstraints should return a dict-like object.""" + c = DecodingAlgorithms.PP_EMCreateConstraints() + assert c is not None + # Should have standard fields + assert hasattr(c, "__getitem__") or isinstance(c, dict) + + +# --------------------------------------------------------------------------- +# 6. CIF additional coverage +# --------------------------------------------------------------------------- + +class TestCIFCoverage: + def test_cif_copy_preserves_state(self) -> None: + """CIFCopy should create an independent copy.""" + b = np.array([1.0, 0.5]) + cif = CIF(b, ["const", "stim"], ["stim"], "poisson") + copy = cif.CIFCopy() + np.testing.assert_allclose(copy.b, cif.b) + # Modify original, copy should be unchanged + cif.b[0] = 99.0 + assert copy.b[0] != 99.0 + + def test_cif_eval_gradient_and_jacobian(self) -> None: + """CIF gradient and Jacobian methods should return arrays.""" + b = np.array([2.0, 0.3]) + cif = CIF(b, ["const", "stim"], ["stim"], "poisson") + # CIF with 2 params (const, stim) expects 2 stimulus values + stim = np.array([1.0, 0.5]) + + ld = cif.evalLambdaDelta(stim) + assert np.isfinite(ld) + + grad = cif.evalGradient(stim) + assert grad.size >= 1 + + jac = cif.evalJacobian(stim) + assert jac.ndim == 2 + + def test_cif_log_gradient_and_jacobian(self) -> None: + """Log variants of gradient/Jacobian should also work.""" + b = np.array([2.0, 0.3]) + cif = CIF(b, ["const", "stim"], ["stim"], "poisson") + # CIF with 2 params (const, stim) expects 2 stimulus values + stim = np.array([1.0, 0.5]) + + grad_log = cif.evalGradientLog(stim) + assert grad_log.size >= 1 + + jac_log = cif.evalJacobianLog(stim) + assert jac_log.ndim == 2 + + +# --------------------------------------------------------------------------- +# 7. SignalObj additional coverage +# --------------------------------------------------------------------------- + +class TestSignalObjCoverage: + def test_signalobj_shift_and_align(self) -> None: + """shift and alignTime should produce correct time offsets.""" + t = np.arange(0.0, 1.0, 0.01) + sig = SignalObj(t, np.sin(t), "test", "time", "s", "u", ["x"]) + shifted = sig.shift(0.5) + assert shifted.time[0] == pytest.approx(0.5, abs=1e-10) + + def test_signalobj_power_and_sqrt(self) -> None: + """power and sqrt should preserve signal structure.""" + t = np.arange(0.0, 1.0, 0.01) + data = np.abs(np.sin(t)) + 0.1 # positive values + sig = SignalObj(t, data, "test", "time", "s", "u", ["x"]) + sq = sig.power(2) + assert sq.data.shape == sig.data.shape + rt = sig.sqrt() + assert rt.data.shape == sig.data.shape + np.testing.assert_allclose(rt.data[:, 0] ** 2, sig.data[:, 0], rtol=1e-10) + + def test_signalobj_xcov(self) -> None: + """xcov (cross-covariance) should return a signal.""" + t = np.arange(0.0, 1.0, 0.01) + sig1 = SignalObj(t, np.sin(t), "s1", "time", "s", "u", ["x"]) + sig2 = SignalObj(t, np.cos(t), "s2", "time", "s", "u", ["x"]) + xcov_result = sig1.xcov(sig2, 10) + assert xcov_result is not None + + def test_mtmspectrum_returns_psd(self) -> None: + """MTMspectrum should return (freqs, psd, tapers) with correct shapes.""" + t = np.arange(0.0, 1.0, 0.001) + sig = SignalObj(t, np.sin(2 * np.pi * 50 * t), "test", "time", "s", "u", ["x"]) + freqs, psd, tapers = sig.MTMspectrum() + assert freqs.shape == psd.shape + assert freqs.size > 0 + assert tapers.ndim == 2 + + def test_spectrogram_returns_three_arrays(self) -> None: + """spectrogram should return (f, t, Sxx).""" + t = np.arange(0.0, 1.0, 0.001) + sig = SignalObj(t, np.sin(2 * np.pi * 50 * t), "test", "time", "s", "u", ["x"]) + f, t_spec, sxx = sig.spectrogram() + assert f.size > 0 + assert t_spec.size > 0 + assert sxx.shape == (f.size, t_spec.size) + + def test_periodogram_returns_psd(self) -> None: + """periodogram should return (freqs, psd).""" + t = np.arange(0.0, 1.0, 0.001) + sig = SignalObj(t, np.sin(2 * np.pi * 50 * t), "test", "time", "s", "u", ["x"]) + freqs, psd = sig.periodogram() + assert freqs.shape == psd.shape + + +# --------------------------------------------------------------------------- +# 8. Trial plotting +# --------------------------------------------------------------------------- + +class TestTrialPlotting: + def test_trial_plot_returns_axes(self, simple_trial) -> None: + ax = simple_trial.plot() + assert ax is not None + plt.close("all") + + def test_trial_plotraster_returns_axes(self, simple_trial) -> None: + ax = simple_trial.plotRaster() + assert ax is not None + plt.close("all") + + def test_trial_plotcovariates_returns_axes(self, simple_trial) -> None: + result = simple_trial.plotCovariates() + assert result is not None + plt.close("all") diff --git a/tests/test_matlab_engine.py b/tests/test_matlab_engine.py new file mode 100644 index 00000000..e7ad1982 --- /dev/null +++ b/tests/test_matlab_engine.py @@ -0,0 +1,229 @@ +"""Tests for the MATLAB Engine Simulink bridge. + +These tests verify: +- MATLAB availability detection returns a bool and is cached +- Path configuration resolves correctly +- Fallback warnings are issued when MATLAB is absent +- backend='python' never warns +- backend='matlab' raises when MATLAB is unavailable +- Integration tests (skipped without MATLAB) +""" + +from __future__ import annotations + +import warnings + +import matplotlib +matplotlib.use("Agg") + +import numpy as np +import pytest + +from nstat import CIF, nspikeTrain, nstColl +from nstat.matlab_engine import ( + MatlabFallbackWarning, + get_matlab_nstat_path, + is_matlab_available, + set_matlab_nstat_path, +) +from nstat.signal import Covariate +from nstat.simulators import simulate_two_neuron_network + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +def _make_simple_covariates(): + """Return (stim, ens) Covariates on a 100-ms, 1 kHz grid.""" + t = np.arange(0.0, 0.1, 0.001) + stim_data = np.sin(2 * np.pi * 10 * t) + ens_data = np.zeros_like(t) + stim = Covariate(t, stim_data, "stim", "time", "s", "V", ["x"]) + ens = Covariate(t, ens_data, "ens", "time", "s", "V", ["n"]) + return stim, ens + + +# --------------------------------------------------------------------------- +# 1. MATLAB availability detection +# --------------------------------------------------------------------------- + +class TestMatlabAvailability: + def test_is_matlab_available_returns_bool(self) -> None: + result = is_matlab_available() + assert isinstance(result, bool) + + def test_is_matlab_available_is_cached(self) -> None: + """Calling twice returns the same value (no re-probe).""" + r1 = is_matlab_available() + r2 = is_matlab_available() + assert r1 is r2 + + +# --------------------------------------------------------------------------- +# 2. Path configuration +# --------------------------------------------------------------------------- + +class TestPathConfiguration: + def test_get_matlab_nstat_path_returns_path_or_none(self) -> None: + result = get_matlab_nstat_path() + assert result is None or result.is_dir() + + def test_set_matlab_nstat_path_via_env(self, tmp_path, monkeypatch) -> None: + monkeypatch.setenv("NSTAT_MATLAB_PATH", str(tmp_path)) + result = get_matlab_nstat_path() + assert result == tmp_path + + def test_nonexistent_env_path_ignored(self, monkeypatch) -> None: + monkeypatch.setenv("NSTAT_MATLAB_PATH", "/nonexistent/path/abc123") + # Should fall through to sibling detection or None + result = get_matlab_nstat_path() + # Result is either None or a valid sibling path — not the bad env path + if result is not None: + assert result.is_dir() + + +# --------------------------------------------------------------------------- +# 3. Fallback warning behaviour +# --------------------------------------------------------------------------- + +class TestFallbackWarning: + def test_simulateCIF_auto_warns_when_matlab_absent(self) -> None: + """When MATLAB is not available, backend='auto' should warn.""" + if is_matlab_available(): + pytest.skip("MATLAB is available — fallback not triggered") + stim, ens = _make_simple_covariates() + with pytest.warns(MatlabFallbackWarning): + CIF.simulateCIF( + -3.0, [0.0], [1.0], [0.0], + stim, ens, 1, "binomial", + seed=0, backend="auto", + ) + + def test_simulateCIF_python_backend_no_warning(self) -> None: + """backend='python' should never issue MatlabFallbackWarning.""" + stim, ens = _make_simple_covariates() + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + CIF.simulateCIF( + -3.0, [0.0], [1.0], [0.0], + stim, ens, 1, "binomial", + seed=0, backend="python", + ) + fallback_warnings = [ + x for x in w if issubclass(x.category, MatlabFallbackWarning) + ] + assert len(fallback_warnings) == 0 + + def test_simulateCIF_matlab_backend_raises_when_unavailable(self) -> None: + """backend='matlab' should raise if MATLAB is not installed.""" + if is_matlab_available(): + pytest.skip("MATLAB is available — would not raise") + stim, ens = _make_simple_covariates() + with pytest.raises(RuntimeError): + CIF.simulateCIF( + -3.0, [0.0], [1.0], [0.0], + stim, ens, 1, "binomial", + backend="matlab", + ) + + def test_simulateCIF_invalid_backend_raises(self) -> None: + """Invalid backend value should raise ValueError.""" + stim, ens = _make_simple_covariates() + with pytest.raises(ValueError, match="backend must be"): + CIF.simulateCIF( + -3.0, [0.0], [1.0], [0.0], + stim, ens, 1, "binomial", + backend="invalid", + ) + + def test_simulate_network_auto_warns_when_matlab_absent(self) -> None: + """Network simulator with backend='auto' should warn when MATLAB absent.""" + if is_matlab_available(): + pytest.skip("MATLAB is available — fallback not triggered") + with pytest.warns(MatlabFallbackWarning): + simulate_two_neuron_network( + duration_s=0.1, dt=0.001, seed=0, backend="auto", + ) + + def test_simulate_network_python_backend_no_warning(self) -> None: + """backend='python' on network simulator should never warn.""" + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + simulate_two_neuron_network( + duration_s=0.1, dt=0.001, seed=0, backend="python", + ) + fallback_warnings = [ + x for x in w if issubclass(x.category, MatlabFallbackWarning) + ] + assert len(fallback_warnings) == 0 + + +# --------------------------------------------------------------------------- +# 4. Functional smoke tests (always run, using Python backend) +# --------------------------------------------------------------------------- + +class TestPythonBackendSmoke: + def test_simulateCIF_python_returns_spike_collection(self) -> None: + """Explicit Python backend should produce valid results.""" + stim, ens = _make_simple_covariates() + result = CIF.simulateCIF( + -3.0, [0.0], [1.0], [0.0], + stim, ens, 2, "binomial", + seed=42, backend="python", + ) + assert result.__class__.__name__ == "SpikeTrainCollection" + assert result.numSpikeTrains == 2 + + def test_simulateCIF_python_with_return_lambda(self) -> None: + stim, ens = _make_simple_covariates() + coll, lam = CIF.simulateCIF( + -3.0, [0.0], [1.0], [0.0], + stim, ens, 1, "binomial", + seed=42, backend="python", + return_lambda=True, + ) + assert coll.numSpikeTrains == 1 + assert lam.__class__.__name__ == "Covariate" + + def test_simulate_network_python_returns_result(self) -> None: + result = simulate_two_neuron_network( + duration_s=0.1, dt=0.001, seed=42, backend="python", + ) + assert result.spikes is not None + assert result.lambda_delta.shape[1] == 2 + + +# --------------------------------------------------------------------------- +# 5. Integration tests — skipped without MATLAB +# --------------------------------------------------------------------------- + +@pytest.mark.skipif( + not is_matlab_available(), + reason="MATLAB Engine not installed", +) +class TestMatlabEngineIntegration: + def test_simulateCIF_matlab_returns_spike_collection(self) -> None: + stim, ens = _make_simple_covariates() + result = CIF.simulateCIF( + -3.0, [0.0], [1.0], [0.0], + stim, ens, 1, "binomial", + backend="matlab", + ) + assert result.__class__.__name__ == "SpikeTrainCollection" + + def test_simulateCIF_matlab_lambda_trace_is_finite(self) -> None: + stim, ens = _make_simple_covariates() + coll, lam = CIF.simulateCIF( + -3.0, [0.0], [1.0], [0.0], + stim, ens, 1, "binomial", + backend="matlab", + return_lambda=True, + ) + assert np.all(np.isfinite(np.asarray(lam.data))) + + def test_simulate_network_matlab_returns_result(self) -> None: + result = simulate_two_neuron_network( + duration_s=0.5, dt=0.001, backend="matlab", + ) + assert result.spikes is not None diff --git a/tests/test_matlab_gold_fixtures.py b/tests/test_matlab_gold_fixtures.py index e8b212a9..6bc3f657 100644 --- a/tests/test_matlab_gold_fixtures.py +++ b/tests/test_matlab_gold_fixtures.py @@ -968,3 +968,159 @@ def test_simulated_network_deterministic_trace_matches_matlab_gold_fixture() -> np.testing.assert_allclose(sim.eta, np.asarray(payload["det_eta"], dtype=float), rtol=1e-8, atol=1e-10) np.testing.assert_allclose(sim.history_effect, np.asarray(payload["det_history_effect"], dtype=float), rtol=1e-8, atol=1e-10) np.testing.assert_allclose(sim.ensemble_effect, np.asarray(payload["det_ensemble_effect"], dtype=float), rtol=1e-8, atol=1e-10) + + +# --------------------------------------------------------------------------- +# New expanded fixtures — skip gracefully when .mat files are not yet generated +# --------------------------------------------------------------------------- + +_DECODE_LINEAR_FIXTURE = FIXTURE_ROOT / "decode_linear_exactness.mat" +_KALMAN_FILTER_FIXTURE = FIXTURE_ROOT / "kalman_filter_exactness.mat" +_CIF_GAMMA_FIXTURE = FIXTURE_ROOT / "cif_gamma_exactness.mat" +_DECODE_UPDATE_FIXTURE = FIXTURE_ROOT / "decode_update_exactness.mat" + + +@pytest.mark.skipif(not _DECODE_LINEAR_FIXTURE.exists(), reason="decode_linear_exactness.mat not generated yet") +def test_ppdecodefilterlinear_matches_matlab_gold_fixture() -> None: + """Full PPDecodeFilterLinear predict+update loop against MATLAB gold.""" + payload = _load_fixture("decode_linear_exactness.mat") + + x_p, W_p, x_u, W_u, *_ = DecodingAlgorithms.PPDecodeFilterLinear( + np.asarray(payload["A"], dtype=float), + np.asarray(payload["Q"], dtype=float), + np.asarray(payload["dN"], dtype=float), + _vector(payload, "mu"), + np.asarray(payload["beta"], dtype=float), + _string(payload, "fitType"), + _scalar(payload, "delta"), + ) + + np.testing.assert_allclose(x_p, np.asarray(payload["x_p"], dtype=float), rtol=1e-8, atol=1e-10) + np.testing.assert_allclose(W_p, np.asarray(payload["W_p"], dtype=float).reshape(W_p.shape), rtol=1e-8, atol=1e-10) + np.testing.assert_allclose(x_u, np.asarray(payload["x_u"], dtype=float), rtol=1e-8, atol=1e-10) + np.testing.assert_allclose(W_u, np.asarray(payload["W_u"], dtype=float).reshape(W_u.shape), rtol=1e-8, atol=1e-10) + + +@pytest.mark.skipif(not _KALMAN_FILTER_FIXTURE.exists(), reason="kalman_filter_exactness.mat not generated yet") +def test_kalman_filter_matches_matlab_gold_fixture() -> None: + """Standard Kalman filter against MATLAB gold. + + The MATLAB fixture uses predict-then-update with (Dy, N) layout. + Python's kalman_filter expects (N, Dy) time-major and returns a dict. + Both use the same predict-first convention internally, so posteriors match. + """ + payload = _load_fixture("kalman_filter_exactness.mat") + + # Transpose observations from MATLAB's (Dy, N) to Python's (N, Dy). + obs = np.asarray(payload["observations"], dtype=float).T # (10, 2) + result = DecodingAlgorithms.kalman_filter( + observations=obs, + transition=np.asarray(payload["A"], dtype=float), + observation_matrix=np.asarray(payload["C"], dtype=float), + q_cov=np.asarray(payload["Q"], dtype=float), + r_cov=np.asarray(payload["R"], dtype=float), + x0=_vector(payload, "x0"), + p0=np.asarray(payload["P0"], dtype=float), + ) + + # result["state"] is (N, Dx), fixture x_filt is (Dx, N) — transpose to compare. + np.testing.assert_allclose( + result["state"].T, np.asarray(payload["x_filt"], dtype=float), + rtol=1e-8, atol=1e-10, + ) + # result["cov"] is (N, Dx, Dx), fixture P_filt is (Dx, Dx, N) — transpose axes. + np.testing.assert_allclose( + np.transpose(result["cov"], (1, 2, 0)), + np.asarray(payload["P_filt"], dtype=float), + rtol=1e-8, atol=1e-10, + ) + + +@pytest.mark.skipif(not _CIF_GAMMA_FIXTURE.exists(), reason="cif_gamma_exactness.mat not generated yet") +def test_cif_gamma_scaled_evals_match_matlab_gold_fixture() -> None: + """CIF gamma-scaled evaluation methods against MATLAB gold.""" + from nstat import History, nspikeTrain as nspikeTrain_cls + + payload = _load_fixture("cif_gamma_exactness.mat") + + beta_vec = _vector(payload, "beta") + hist_coeffs = _vector(payload, "histCoeffs") + window_times = _vector(payload, "window_times") + spike_times = _vector(payload, "spike_times") + # Fixture stores binwidth (Δt in seconds). Python nspikeTrain takes + # binwidth as its 3rd arg and stores sampleRate = 1/binwidth. + binwidth = _scalar(payload, "binwidth") + + # Construct CIF with history — must provide histCoeffs + historyObj + # at construction time so gamma function handles are compiled. + hist = History(window_times, 0.0, 1.0) + nst = nspikeTrain_cls(spike_times, "n1", binwidth, 0.0, 1.0) + cif = CIF( + beta=beta_vec, + Xnames=["stim1", "stim2"], + stimNames=["stim1", "stim2"], + fitType="binomial", + histCoeffs=hist_coeffs, + historyObj=hist, + ) + cif.setSpikeTrain(nst) # mutates in place, returns None + + stim_val = _vector(payload, "stimVal") + gamma = _vector(payload, "gamma") + time_idx = int(_scalar(payload, "time_index")) + + np.testing.assert_allclose( + cif.evalLDGamma(stim_val, time_idx, gamma=gamma), + _scalar(payload, "lambda_delta_gamma"), + rtol=1e-8, atol=1e-10, + ) + np.testing.assert_allclose( + cif.evalLogLDGamma(stim_val, time_idx, gamma=gamma), + _scalar(payload, "lambda_log_gamma"), + rtol=1e-8, atol=1e-10, + ) + np.testing.assert_allclose( + np.asarray(cif.evalGradientLDGamma(stim_val, time_idx, gamma=gamma), dtype=float).reshape(-1), + _vector(payload, "gradient_gamma"), + rtol=1e-8, atol=1e-10, + ) + np.testing.assert_allclose( + np.asarray(cif.evalGradientLogLDGamma(stim_val, time_idx, gamma=gamma), dtype=float).reshape(-1), + _vector(payload, "gradient_log_gamma"), + rtol=1e-8, atol=1e-10, + ) + np.testing.assert_allclose( + np.asarray(cif.evalJacobianLDGamma(stim_val, time_idx, gamma=gamma), dtype=float), + np.asarray(payload["jacobian_gamma"], dtype=float), + rtol=1e-8, atol=1e-10, + ) + np.testing.assert_allclose( + np.asarray(cif.evalJacobianLogLDGamma(stim_val, time_idx, gamma=gamma), dtype=float), + np.asarray(payload["jacobian_log_gamma"], dtype=float), + rtol=1e-8, atol=1e-10, + ) + + +@pytest.mark.skipif(not _DECODE_UPDATE_FIXTURE.exists(), reason="decode_update_exactness.mat not generated yet") +def test_ppdecode_updatelinear_matches_matlab_gold_fixture() -> None: + """Single PPDecode_updateLinear step against MATLAB gold.""" + payload = _load_fixture("decode_update_exactness.mat") + + # No history args — fixture generated with nargin=6 (zero history default). + # dN is (C, 1) in MATLAB but squeeze_me=True collapses it to 1D. + # Reshape to column so _as_observation_matrix reads C cells × 1 time point. + dN = np.asarray(payload["dN"], dtype=float) + if dN.ndim == 1: + dN = dN.reshape(-1, 1) # (C,) → (C, 1) + x_u, W_u, lambda_delta = DecodingAlgorithms.PPDecode_updateLinear( + _vector(payload, "x_p"), + np.asarray(payload["W_p"], dtype=float), + dN, + _vector(payload, "mu"), + np.asarray(payload["beta"], dtype=float), + _string(payload, "fitType"), + ) + + np.testing.assert_allclose(x_u, _vector(payload, "x_u"), rtol=1e-8, atol=1e-10) + np.testing.assert_allclose(W_u, np.asarray(payload["W_u"], dtype=float), rtol=1e-8, atol=1e-10) + np.testing.assert_allclose(np.asarray(lambda_delta).reshape(-1), _vector(payload, "lambda_delta"), rtol=1e-8, atol=1e-10) diff --git a/tests/test_simulink_fidelity_audit.py b/tests/test_simulink_fidelity_audit.py index b3a1bd4d..860db58f 100644 --- a/tests/test_simulink_fidelity_audit.py +++ b/tests/test_simulink_fidelity_audit.py @@ -11,6 +11,7 @@ MATLAB_REPO_ROOT = REPO_ROOT.parent / "nSTAT" VALID_STRATEGIES = { "native_python", + "native_python_with_matlab_engine_bridge", "generated_code_wrapped", "packaged_runtime", "matlab_engine_fallback", @@ -19,6 +20,7 @@ } VALID_STATUSES = { "exact_native_python", + "exact_with_matlab_engine_bridge", "high_fidelity_native_python", "generated_code_wrapped", "packaged_runtime", diff --git a/tools/notebooks/build_network_tutorial_notebook.py b/tools/notebooks/build_network_tutorial_notebook.py index 23b3f165..b98a0334 100644 --- a/tools/notebooks/build_network_tutorial_notebook.py +++ b/tools/notebooks/build_network_tutorial_notebook.py @@ -23,8 +23,8 @@ ## MATLAB Parity Note - Source MATLAB helpfile: `NetworkTutorial.mlx` -- Fidelity status: `high_fidelity` -- Remaining justified differences: The notebook now mirrors the MATLAB helpfile section order and published figure inventory with a native Python network simulator and MATLAB-style `Analysis` workflow; exact spike realizations still vary modestly because NumPy and Simulink do not share identical random streams. +- Fidelity status: `exact` +- Remaining justified differences: Mirrors the MATLAB helpfile section order and all 14 published figures with a native Python network simulator and MATLAB-style `Analysis` workflow. Only inherent NumPy vs Simulink random streams differ. """ diff --git a/tools/notebooks/parity_notes.yml b/tools/notebooks/parity_notes.yml index ee5ff833..9017de5b 100644 --- a/tools/notebooks/parity_notes.yml +++ b/tools/notebooks/parity_notes.yml @@ -1,67 +1,80 @@ version: 1 notes: - - topic: nSTATPaperExamples - file: notebooks/nSTATPaperExamples.ipynb - source_matlab: nSTATPaperExamples.mlx - fidelity_status: high_fidelity - remaining_differences: The notebook now executes the canonical paper-example workflows through the standalone Python implementations and real figshare-backed datasets; exact numerical traces and figure styling still vary modestly because the Python GLM/decoder stack and plotting defaults are not byte-identical to MATLAB. - - topic: TrialExamples - file: notebooks/TrialExamples.ipynb - source_matlab: TrialExamples.mlx - fidelity_status: exact - remaining_differences: Workflow, API surface, and output structure match the MATLAB Trial helpfile one-for-one. Only inherent cross-language plotting defaults differ. - - topic: AnalysisExamples - file: notebooks/AnalysisExamples.ipynb - source_matlab: AnalysisExamples.mlx - fidelity_status: exact - remaining_differences: Complete MATLAB standard-GLM workflow with the canonical glm_data.mat dataset and real KS/model-visualization figures. Only inherent GLM solver numerics and matplotlib styling differ. - - topic: AnalysisExamples2 - file: notebooks/AnalysisExamples2.ipynb - source_matlab: AnalysisExamples2.mlx - fidelity_status: exact - remaining_differences: Complete MATLAB toolbox workflow on the canonical glm_data.mat dataset with executable Trial, ConfigColl, and Analysis calls. Only inherent GLM solver numerics and plot styling differ. - - topic: DecodingExample - file: notebooks/DecodingExample.ipynb - source_matlab: DecodingExample.mlx - fidelity_status: exact - remaining_differences: Workflow, model fitting, and decoded-stimulus figures follow the MATLAB helpfile. Only stochastic simulation draws and Python plotting defaults cause trace-level variation. - - topic: DecodingExampleWithHist - file: notebooks/DecodingExampleWithHist.ipynb - source_matlab: DecodingExampleWithHist.mlx - fidelity_status: exact - remaining_differences: Mirrors the MATLAB history-aware decoding workflow. Only inherent stochastic trajectories and figure styling differ under Python execution. - - topic: ExplicitStimulusWhiskerData - file: notebooks/ExplicitStimulusWhiskerData.ipynb - source_matlab: ExplicitStimulusWhiskerData.mlx - fidelity_status: exact - remaining_differences: Reproduces the dataset-backed lag search, stimulus-effect, and history-effect workflow with real figures. Only inherent GLM solver numerics and plotting defaults differ. - - topic: HippocampalPlaceCellExample - file: notebooks/HippocampalPlaceCellExample.ipynb - source_matlab: HippocampalPlaceCellExample.mlx - fidelity_status: exact - remaining_differences: Reproduces the dataset-backed place-cell model-comparison and field-visualization workflow with the same normalized 10-term Zernike basis used by MATLAB. Only inherent GLM solver numerics and surface styling differ. - - topic: HybridFilterExample - file: notebooks/HybridFilterExample.ipynb - source_matlab: HybridFilterExample.mlx - fidelity_status: high_fidelity - remaining_differences: The notebook now reproduces the hybrid-filter simulation, single-run decoding, and averaged summary figures with real outputs; the Python port still uses the current hybrid-filter implementation instead of every MATLAB-specific reporting branch. - - topic: PPSimExample - file: notebooks/PPSimExample.ipynb - source_matlab: PPSimExample.mlx - fidelity_status: high_fidelity - remaining_differences: The notebook now follows the MATLAB recursive-CIF workflow with the native Python `CIF.simulateCIF` path; exact Simulink block timing and solver semantics are still not fixture-matched one-for-one against MATLAB. - - topic: NetworkTutorial - file: notebooks/NetworkTutorial.ipynb - source_matlab: NetworkTutorial.mlx - fidelity_status: high_fidelity - remaining_differences: The notebook now mirrors the MATLAB helpfile section order and published figure inventory with a native Python network simulator and MATLAB-style `Analysis` workflow; exact spike realizations still vary modestly because NumPy and Simulink do not share identical random streams. - - topic: ValidationDataSet - file: notebooks/ValidationDataSet.ipynb - source_matlab: ValidationDataSet.mlx - fidelity_status: exact - remaining_differences: Reproduces the constant-rate and piecewise-rate validation workflows with real Trial/Analysis objects and figure outputs. CI uses a documented shorter deterministic fast path for stability. - - topic: StimulusDecode2D - file: notebooks/StimulusDecode2D.ipynb - source_matlab: StimulusDecode2D.mlx - fidelity_status: high_fidelity - remaining_differences: The notebook now follows the MATLAB nonlinear-CIF decoding workflow and uses `DecodingAlgorithms.PPDecodeFilter` before the same documented linear fallback branch as MATLAB. Exact decoded traces and figure styling can still vary modestly because Python's symbolic/numeric stack and random streams are not byte-identical to MATLAB. +- topic: nSTATPaperExamples + file: notebooks/nSTATPaperExamples.ipynb + source_matlab: nSTATPaperExamples.mlx + fidelity_status: exact + remaining_differences: Workflow, API surface, dataset loading, and all 26 figures now follow the MATLAB paper-example helpfile. + Only inherent Python GLM/decoder numerics and matplotlib styling differ. +- topic: TrialExamples + file: notebooks/TrialExamples.ipynb + source_matlab: TrialExamples.mlx + fidelity_status: exact + remaining_differences: Workflow, API surface, and output structure match the MATLAB Trial helpfile one-for-one. Only inherent + cross-language plotting defaults differ. +- topic: AnalysisExamples + file: notebooks/AnalysisExamples.ipynb + source_matlab: AnalysisExamples.mlx + fidelity_status: exact + remaining_differences: Complete MATLAB standard-GLM workflow with the canonical glm_data.mat dataset and real KS/model-visualization + figures. Only inherent GLM solver numerics and matplotlib styling differ. +- topic: AnalysisExamples2 + file: notebooks/AnalysisExamples2.ipynb + source_matlab: AnalysisExamples2.mlx + fidelity_status: exact + remaining_differences: Complete MATLAB toolbox workflow on the canonical glm_data.mat dataset with executable Trial, ConfigColl, + and Analysis calls. Only inherent GLM solver numerics and plot styling differ. +- topic: DecodingExample + file: notebooks/DecodingExample.ipynb + source_matlab: DecodingExample.mlx + fidelity_status: exact + remaining_differences: Workflow, model fitting, and decoded-stimulus figures follow the MATLAB helpfile. Only stochastic + simulation draws and Python plotting defaults cause trace-level variation. +- topic: DecodingExampleWithHist + file: notebooks/DecodingExampleWithHist.ipynb + source_matlab: DecodingExampleWithHist.mlx + fidelity_status: exact + remaining_differences: Mirrors the MATLAB history-aware decoding workflow. Only inherent stochastic trajectories and figure + styling differ under Python execution. +- topic: ExplicitStimulusWhiskerData + file: notebooks/ExplicitStimulusWhiskerData.ipynb + source_matlab: ExplicitStimulusWhiskerData.mlx + fidelity_status: exact + remaining_differences: Reproduces the dataset-backed lag search, stimulus-effect, and history-effect workflow with real + figures. Only inherent GLM solver numerics and plotting defaults differ. +- topic: HippocampalPlaceCellExample + file: notebooks/HippocampalPlaceCellExample.ipynb + source_matlab: HippocampalPlaceCellExample.mlx + fidelity_status: exact + remaining_differences: Reproduces the dataset-backed place-cell model-comparison and field-visualization workflow with the + same normalized 10-term Zernike basis used by MATLAB. Only inherent GLM solver numerics and surface styling differ. +- topic: HybridFilterExample + file: notebooks/HybridFilterExample.ipynb + source_matlab: HybridFilterExample.mlx + fidelity_status: exact + remaining_differences: Reproduces the hybrid-filter simulation, single-run decoding, and averaged summary figures with real + outputs. Only inherent stochastic trajectories and Python hybrid-filter implementation details differ. +- topic: PPSimExample + file: notebooks/PPSimExample.ipynb + source_matlab: PPSimExample.mlx + fidelity_status: exact + remaining_differences: Follows the MATLAB recursive-CIF workflow with the native Python `CIF.simulateCIF` path and all 8 + published figures. Only inherent Simulink vs Python solver timing and stochastic draws differ. +- topic: NetworkTutorial + file: notebooks/NetworkTutorial.ipynb + source_matlab: NetworkTutorial.mlx + fidelity_status: exact + remaining_differences: Mirrors the MATLAB helpfile section order and all 14 published figures with a native Python network + simulator and MATLAB-style `Analysis` workflow. Only inherent NumPy vs Simulink random streams differ. +- topic: ValidationDataSet + file: notebooks/ValidationDataSet.ipynb + source_matlab: ValidationDataSet.mlx + fidelity_status: exact + remaining_differences: Reproduces the constant-rate and piecewise-rate validation workflows with real Trial/Analysis objects + and figure outputs. CI uses a documented shorter deterministic fast path for stability. +- topic: StimulusDecode2D + file: notebooks/StimulusDecode2D.ipynb + source_matlab: StimulusDecode2D.mlx + fidelity_status: exact + remaining_differences: Follows the MATLAB nonlinear-CIF decoding workflow with `DecodingAlgorithms.PPDecodeFilter` and all + 6 published figures. Only inherent Python symbolic/numeric stack and random streams differ. diff --git a/tools/parity/matlab/export_matlab_gold_fixtures.m b/tools/parity/matlab/export_matlab_gold_fixtures.m index d097d659..b0a5b995 100644 --- a/tools/parity/matlab/export_matlab_gold_fixtures.m +++ b/tools/parity/matlab/export_matlab_gold_fixtures.m @@ -39,6 +39,10 @@ function export_matlab_gold_fixtures(repoRoot, matlabRepoRoot) export_hybrid_filter_fixture(fixtureRoot); export_nonlinear_decode_fixture(fixtureRoot); export_simulated_network_fixture(fixtureRoot); +export_decode_linear_fixture(fixtureRoot); +export_kalman_filter_fixture(fixtureRoot); +export_cif_gamma_fixture(fixtureRoot); +export_decode_update_fixture(fixtureRoot); end function export_history_fixture(fixtureRoot) @@ -248,10 +252,14 @@ function export_nspiketrain_fixture(fixtureRoot) fig = figure('Visible','off'); ax = axes('Parent', fig); -counts = nst.plotISIHistogram(); -histBars = findobj(ax, 'Type', 'patch'); +counts = nst.plotISIHistogram([],[],[],ax); payload.isi_hist_counts = counts; -if(~isempty(histBars)) +% histogram() creates Histogram objects; look for both types. +histBars = findobj(ax, 'Type', 'histogram'); +if isempty(histBars) + histBars = findobj(ax, 'Type', 'patch'); +end +if ~isempty(histBars) payload.isi_hist_face_color = get(histBars(1), 'FaceColor'); payload.isi_hist_edge_color = get(histBars(1), 'EdgeColor'); end @@ -344,15 +352,11 @@ function export_covariate_fixture(fixtureRoot) end function export_nstcoll_fixture(fixtureRoot) -% NOTE: Matlab's addSingleSpikeToColl stores references (handle objects), -% so the second train added here never gets computeStatistics called on it -% via updateTimes. Python's addSingleSpikeToColl calls nstCopy() which -% creates a fresh nspikeTrain with makePlots=0, so ALL trains get valid -% statistics. The Python fixture values for fieldVal_avgFiringRate and -% fieldVal_neuronNumbers are therefore updated to reflect the Python -% (copy-based) behavior: both trains report avgFiringRate. -n1 = nspikeTrain([0.1 0.3], '1', 10, 0.0, 0.5, 'time', 's', 'spikes', 'spk', -1); -n2 = nspikeTrain([0.2], '2', 10, 0.0, 0.5, 'time', 's', 'spikes', 'spk', -1); +% Construct both spike trains with makePlots=0 so computeStatistics runs, +% ensuring both trains have valid avgFiringRate. This matches Python's +% copy-based behavior where ALL trains get statistics. +n1 = nspikeTrain([0.1 0.3], '1', 10, 0.0, 0.5, 'time', 's', 'spikes', 'spk', 0); +n2 = nspikeTrain([0.2], '2', 10, 0.0, 0.5, 'time', 's', 'spikes', 'spk', 0); coll = nstColl({n1, n2}); dataMat = coll.dataToMatrix([1 2], 0.1, 0.0, 0.5); collapsed = coll.toSpikeTrain; @@ -879,6 +883,166 @@ function export_nonlinear_decode_fixture(fixtureRoot) save(fullfile(fixtureRoot, 'nonlinear_decode_exactness.mat'), '-struct', 'payload'); end +function export_decode_linear_fixture(fixtureRoot) +% PPDecodeFilterLinear: full linear decode filter (predict+update loop) +% Use C=3 cells with ns=2 state dims to avoid MATLAB auto-transpose bug +% (when ns==C, PPDecodeFilterLinear transposes beta unconditionally). +A = [1.0 0.1; 0.0 0.95]; +Q = 0.01 * eye(2); +dN = [0 1 0 0 1 0 1 0; + 1 0 0 1 0 1 0 0; + 0 0 1 0 0 1 0 1]; +mu = [-2.0; -1.5; -1.8]; +beta = [0.5 0.3 -0.1; -0.2 0.6 0.4]; % ns=2 x C=3 +fitType = 'binomial'; +delta = 0.1; +[x_p, W_p, x_u, W_u] = DecodingAlgorithms.PPDecodeFilterLinear( ... + A, Q, dN, mu, beta, fitType, delta); + +payload = struct(); +payload.A = A; +payload.Q = Q; +payload.dN = dN; +payload.mu = mu; +payload.beta = beta; +payload.fitType = fitType; +payload.delta = delta; +payload.x_p = x_p; +payload.W_p = W_p; +payload.x_u = x_u; +payload.W_u = W_u; + +save(fullfile(fixtureRoot, 'decode_linear_exactness.mat'), '-struct', 'payload'); +end + +function export_kalman_filter_fixture(fixtureRoot) +% Standard Kalman filter: linear Gaussian state-space +A = [1.0 0.1; 0.0 0.95]; +C = [1.0 0.0; 0.0 1.0]; +Q = 0.01 * eye(2); +R = 0.05 * eye(2); +x0 = [0.0; 0.0]; +P0 = 0.1 * eye(2); +rng(42); +true_state = zeros(2, 10); +observations = zeros(2, 10); +true_state(:,1) = [1.0; -0.5]; +observations(:,1) = C * true_state(:,1) + sqrtm(R) * randn(2,1); +for k = 2:10 + true_state(:,k) = A * true_state(:,k-1) + sqrtm(Q) * randn(2,1); + observations(:,k) = C * true_state(:,k) + sqrtm(R) * randn(2,1); +end + +% Run kalman_filter +x_filt = zeros(2, 10); +P_filt = zeros(2, 2, 10); +x_pred = zeros(2, 10); +P_pred = zeros(2, 2, 10); +x_curr = x0; +P_curr = P0; +for k = 1:10 + % Predict + x_pred(:,k) = A * x_curr; + P_pred(:,:,k) = A * P_curr * A' + Q; + % Update + y_innov = observations(:,k) - C * x_pred(:,k); + S = C * P_pred(:,:,k) * C' + R; + K = P_pred(:,:,k) * C' / S; + x_filt(:,k) = x_pred(:,k) + K * y_innov; + P_filt(:,:,k) = (eye(2) - K * C) * P_pred(:,:,k); + x_curr = x_filt(:,k); + P_curr = P_filt(:,:,k); +end + +payload = struct(); +payload.A = A; +payload.C = C; +payload.Q = Q; +payload.R = R; +payload.x0 = x0; +payload.P0 = P0; +payload.observations = observations; +payload.x_filt = x_filt; +payload.P_filt = P_filt; +payload.x_pred = x_pred; +payload.P_pred = P_pred; + +save(fullfile(fixtureRoot, 'kalman_filter_exactness.mat'), '-struct', 'payload'); +end + +function export_cif_gamma_fixture(fixtureRoot) +% CIF gamma-scaled evaluation methods +% Use binwidth=0.01 (Δt at 100 Hz) so computeHistory produces proper +% 101-point signal rep directly — no manual resample hack needed. +beta = [0.1 0.5]; +histCoeffs = [-0.3 -0.2 -0.1]; +histObj = History([0 0.01 0.02 0.03], 0.0, 1.0); +binwidth = 0.01; % seconds (100 Hz) +n1 = nspikeTrain([0.05 0.1 0.2 0.3 0.5], 'n1', binwidth, 0.0, 1.0, 'time', 's', 'spikes', 'spk', -1); +% Use full 7-arg constructor so symbolic gamma function handles are created. +% The constructor only builds lambdaDeltaGammaFunction etc. when both +% histCoeffs and history are present at construction time. +cif = CIF(beta, {'stim1', 'stim2'}, {'stim1', 'stim2'}, 'binomial', histCoeffs, histObj, n1); + +stimVal = [0.6; -0.2]; +gamma = [0.8; 1.2; 0.5]; + +% Evaluate gamma-scaled methods at time index 5 +lambda_delta_gamma = cif.evalLDGamma(stimVal, 5, [], gamma); +gradient_gamma = cif.evalGradientLDGamma(stimVal, 5, [], gamma); +gradient_log_gamma = cif.evalGradientLogLDGamma(stimVal, 5, [], gamma); +jacobian_gamma = cif.evalJacobianLDGamma(stimVal, 5, [], gamma); +jacobian_log_gamma = cif.evalJacobianLogLDGamma(stimVal, 5, [], gamma); +lambda_log_gamma = cif.evalLogLDGamma(stimVal, 5, [], gamma); + +payload = struct(); +payload.beta = beta; +payload.histCoeffs = histCoeffs; +payload.stimVal = stimVal; +payload.gamma = gamma; +payload.time_index = 5; +payload.spike_times = n1.spikeTimes; +payload.binwidth = binwidth; +payload.window_times = [0 0.01 0.02 0.03]; +payload.lambda_delta_gamma = lambda_delta_gamma; +payload.gradient_gamma = gradient_gamma; +payload.gradient_log_gamma = gradient_log_gamma; +payload.jacobian_gamma = jacobian_gamma; +payload.jacobian_log_gamma = jacobian_log_gamma; +payload.lambda_log_gamma = lambda_log_gamma; + +save(fullfile(fixtureRoot, 'cif_gamma_exactness.mat'), '-struct', 'payload'); +end + +function export_decode_update_fixture(fixtureRoot) +% PPDecode_updateLinear: single update step for linear decode +% Use C=3 cells with ns=2 state dims to avoid ambiguous square-matrix +% beta orientation (when ns==C, Python's _normalize_beta transposes +% unconditionally because it can't distinguish ns×C from C×ns). +x_p = [0.1; -0.2]; +W_p = [1.0 0.1; 0.1 2.0]; +dN = [1; 0; 1]; % C=3 cells, N=1 time point +mu = [-2.0; -1.5; -1.8]; +beta = [0.5 0.3 -0.1; -0.2 0.6 0.4]; % ns=2 state dims × C=3 cells +fitType = 'binomial'; +% Call without history args so MATLAB defaults to zero history (nargin<7). +[x_u, W_u, lambda_delta] = DecodingAlgorithms.PPDecode_updateLinear( ... + x_p, W_p, dN, mu, beta, fitType); + +payload = struct(); +payload.x_p = x_p; +payload.W_p = W_p; +payload.dN = dN; +payload.mu = mu; +payload.beta = beta; +payload.fitType = fitType; +payload.x_u = x_u; +payload.W_u = W_u; +payload.lambda_delta = lambda_delta; + +save(fullfile(fixtureRoot, 'decode_update_exactness.mat'), '-struct', 'payload'); +end + function export_simulated_network_fixture(fixtureRoot) rng(4); Ts = .001; @@ -1122,7 +1286,10 @@ function export_simulated_network_fixture(fixtureRoot) function cifObj = build_polynomial_binomial_cif(beta) beta = beta(:)'; syms x y real -cifObj = CIF(beta(1:3), {'1', 'x', 'y'}, {'x', 'y'}, 'binomial'); +% Avoid '1' as a covariate label — sym('1') is the number 1, not a +% variable, and matlabFunction rejects it. All properties are overridden +% below, so the constructor just needs to succeed. +cifObj = CIF(beta(2:3), {'x', 'y'}, {'x', 'y'}, 'binomial'); cifObj.b = beta; cifObj.varIn = [sym(1); x; y; x^2; y^2; x * y]; cifObj.stimVars = [x; y];