diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 11a74efc..77ad3fae 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -26,6 +26,82 @@ jobs: - name: Run unit tests run: pytest -q + paper-gallery-artifacts: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: "3.11" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + python -m pip install -e .[dev] + - name: Regenerate paper gallery artifacts + run: python tools/paper_examples/build_gallery.py + - name: Fail on paper gallery drift + run: | + git diff --exit-code -- \ + docs/paper_examples.md \ + docs/figures/manifest.json \ + docs/figures/example01/README.md \ + docs/figures/example02/README.md \ + docs/figures/example03/README.md \ + docs/figures/example04/README.md \ + docs/figures/example05/README.md + + parity-report-artifacts: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: "3.11" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + python -m pip install -e .[dev] + - name: Regenerate parity report + run: python tools/parity/build_report.py + - name: Fail on parity report drift + run: git diff --exit-code -- parity/report.md + + docs-build: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: "3.11" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + python -m pip install -e .[dev] + - name: Regenerate paper gallery artifacts + run: python tools/paper_examples/build_gallery.py + - name: Build docs + run: python -m sphinx -W -b html docs docs/_build/html + + installer-smoke: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: "3.11" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + python -m pip install -e .[dev] + - name: Smoke test module installer without data download + run: python -m nstat.install --download-example-data never --no-rebuild-doc-search + - name: Smoke test compatibility no-op flag + run: python -m nstat.install --download-example-data never --no-rebuild-doc-search --clean-user-path-prefs + data-integrity: runs-on: ubuntu-latest diff --git a/README.md b/README.md index 41aec2a2..fce519e8 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ ## Installation ```bash -python -m pip install nstat +python -m pip install nstat-toolbox ``` From source: @@ -46,239 +46,62 @@ Run the setup helper: nstat-install ``` -Equivalent Python API: - -```python -from nstat.install import nstat_install - -report = nstat_install() -``` - -## Examples - -> These examples generate figures with `matplotlib` and save PNGs under `examples/readme_examples/images/`. -> The images below show the expected output. - -Examples below require `matplotlib`: +Module form: ```bash -python -m pip install matplotlib +python -m nstat.install --download-example-data never --no-rebuild-doc-search ``` -### Example 1 — Single sinusoid: signal + multitaper spectrum + spectrogram -Run: - -```bash -python examples/readme_examples/example1_multitaper_and_spectrogram.py -``` +Equivalent Python API: ```python -import matplotlib -matplotlib.use("Agg") - -from pathlib import Path - -import matplotlib.pyplot as plt -import numpy as np -from scipy.signal import spectrogram - -from nstat.compat.matlab import SignalObj - -fs_hz = 1000.0 -dt = 1.0 / fs_hz -duration_s = 2.0 -f0_hz = 10.0 -time = np.arange(0.0, duration_s, dt, dtype=float) - -signal = np.sin(2.0 * np.pi * f0_hz * time) -sig_obj = SignalObj(time=time, data=signal, name="sine_signal", units="a.u.") -freq_hz, psd = sig_obj.MTMspectrum() -f_spec, t_spec, sxx = spectrogram(signal, fs=fs_hz, nperseg=256, noverlap=224, scaling="density", mode="psd") - -fig, axes = plt.subplots(3, 1, figsize=(7.5, 7.5)) -preview_mask = time <= 1.0 -axes[0].plot(time[preview_mask], signal[preview_mask], color="tab:blue", linewidth=1.4) -axes[0].set_title("Signal (10 Hz sinusoid)") -axes[0].set_xlabel("time (s)") -axes[0].set_ylabel("amplitude") -axes[1].plot(freq_hz, psd, color="tab:orange", linewidth=1.2) -axes[1].set_xlim(0.0, 100.0) -axes[1].set_title("Multi-taper spectrum") -axes[1].set_xlabel("frequency (Hz)") -axes[1].set_ylabel("PSD") -im = axes[2].pcolormesh(t_spec, f_spec, sxx, shading="auto", cmap="magma") -axes[2].set_ylim(0.0, 100.0) -axes[2].set_title("Spectrogram") -axes[2].set_xlabel("time (s)") -axes[2].set_ylabel("frequency (Hz)") -fig.colorbar(im, ax=axes[2], pad=0.01, label="PSD") -fig.tight_layout() - -out_dir = Path("examples/readme_examples/images") -out_dir.mkdir(parents=True, exist_ok=True) -fig.savefig(out_dir / "readme_example1_multitaper_and_spectrogram.png", dpi=180) -``` - -**Expected output** -![Multitaper and spectrogram](examples/readme_examples/images/readme_example1_multitaper_and_spectrogram.png) - -### Example 2 — Time-varying CIF over 10 seconds (single-frequency sinusoid) -Run: - -```bash -python examples/readme_examples/example2_simulate_cif_spiketrain_10s.py -``` +from nstat.install import nstat_install -```python -import matplotlib -matplotlib.use("Agg") - -from pathlib import Path - -import matplotlib.pyplot as plt -import numpy as np - -from nstat.compat.matlab import CIF, Covariate - -np.random.seed(0) -dt = 0.001 -duration_s = 10.0 -t = np.arange(0.0, duration_s + dt, dt, dtype=float) - -f_hz = 0.5 -baseline_hz = 15.0 -amp_hz = 10.0 -lam = np.clip(baseline_hz + amp_hz * np.sin(2.0 * np.pi * f_hz * t), 0.2, None) - -lambda_cov = Covariate(time=t, data=lam, name="Lambda", units="spikes/s", labels=["lambda"]) -spikes = CIF.simulateCIFByThinningFromLambda(lambda_cov, 1, dt) -spike_times = spikes.getNST(0).spike_times - -fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8.0, 4.8), sharex=True, gridspec_kw={"height_ratios": [2.0, 1.0]}) -ax1.plot(t, lam, color="tab:blue", linewidth=1.3) -ax1.set_ylabel("rate (spikes/s)") -ax1.set_title("Time-varying CIF over 10 s") -ax2.vlines(spike_times, 0.0, 1.0, color="black", linewidth=0.8) -ax2.set_ylim(0.0, 1.0) -ax2.set_yticks([]) -ax2.set_xlabel("time (s)") -ax2.set_title("Simulated spike train") -fig.tight_layout() - -out_dir = Path("examples/readme_examples/images") -out_dir.mkdir(parents=True, exist_ok=True) -fig.savefig(out_dir / "readme_example2_simulate_cif_spiketrain_10s.png", dpi=180) +report = nstat_install() ``` -**Expected output** -![CIF spike train simulation](examples/readme_examples/images/readme_example2_simulate_cif_spiketrain_10s.png) +`clean_user_path_prefs` is accepted for MATLAB-API compatibility, but it is a +Python no-op because import paths are managed by the environment rather than a +MATLAB-style saved user path. -### Example 3 — Spike train collection raster from Example 2 -Run: - -```bash -python examples/readme_examples/example3_nstcoll_raster_from_example2.py -``` - -```python -import matplotlib -matplotlib.use("Agg") - -from pathlib import Path - -import matplotlib.pyplot as plt -import numpy as np - -from nstat.compat.matlab import CIF, Covariate - -np.random.seed(0) -dt = 0.001 -duration_s = 10.0 -n_units = 20 -t = np.arange(0.0, duration_s + dt, dt, dtype=float) - -f_hz = 0.5 -baseline_hz = 15.0 -amp_hz = 10.0 -lam = np.clip(baseline_hz + amp_hz * np.sin(2.0 * np.pi * f_hz * t), 0.2, None) - -lambda_cov = Covariate(time=t, data=lam, name="Lambda", units="spikes/s", labels=["lambda"]) -coll = CIF.simulateCIFByThinningFromLambda(lambda_cov, n_units, dt) - -fig, ax = plt.subplots(figsize=(8.0, 4.8)) -plt.sca(ax) -coll.plot() -ax.set_xlabel("time (s)") -ax.set_ylabel("unit index") -ax.set_title("Spike-train collection raster (nstColl.plot)") -ax.set_ylim(0.5, n_units + 0.5) -fig.tight_layout() - -out_dir = Path("examples/readme_examples/images") -out_dir.mkdir(parents=True, exist_ok=True) -fig.savefig(out_dir / "readme_example3_nstcoll_raster.png", dpi=180) -``` +## Examples -**Expected output** -![Spike train raster](examples/readme_examples/images/readme_example3_nstcoll_raster.png) +### Paper Examples (Self-Contained) -### nSTATPaperExamples +Canonical source files: +- `examples/paper/*.py` +- `nstat/paper_examples_full.py` -Run: +Single command to regenerate the paper-example gallery metadata: ```bash -python examples/readme_examples/example4_nstatpaperexamples_overview.py +python tools/paper_examples/build_gallery.py ``` -```python -import matplotlib -matplotlib.use("Agg") +This writes `docs/paper_examples.md` and `docs/figures/manifest.json`, and +ensures canonical figure-gallery directories exist under +`docs/figures/example01/` through `docs/figures/example05/`. -from pathlib import Path +| Example | Thumbnail | What question it answers | Run command | Links | +|---|---|---|---|---| +| Example 01 | ![Example 01](docs/figures/example01/fig01_constant_mg_summary.png) | Does Mg2+ washout produce firing-rate dynamics beyond a constant Poisson baseline? | `python examples/paper/example01_mepsc_poisson.py` | [Script](examples/paper/example01_mepsc_poisson.py) · [Figures](docs/figures/example01/) | +| Example 02 | ![Example 02](docs/figures/example02/fig01_data_overview.png) | What stimulus lag and history order best explain whisker-evoked spike trains? | `python examples/paper/example02_whisker_stimulus_thalamus.py` | [Script](examples/paper/example02_whisker_stimulus_thalamus.py) · [Figures](docs/figures/example02/) | +| Example 03 | ![Example 03](docs/figures/example03/fig01_simulated_and_real_rasters.png) | How do PSTH and SSGLM differ in capturing trial learning dynamics? | `python examples/paper/example03_psth_and_ssglm.py` | [Script](examples/paper/example03_psth_and_ssglm.py) · [Figures](docs/figures/example03/) | +| Example 04 | ![Example 04](docs/figures/example04/fig01_example_cells_path_overlay.png) | How do Gaussian and Zernike basis models compare for place-field mapping? | `python examples/paper/example04_place_cells_continuous_stimulus.py` | [Script](examples/paper/example04_place_cells_continuous_stimulus.py) · [Figures](docs/figures/example04/) | +| Example 05 | ![Example 05](docs/figures/example05/fig01_univariate_setup.png) | How accurately can neural populations decode latent stimulus and reach state? | `python examples/paper/example05_decoding_ppaf_pphf.py` | [Script](examples/paper/example05_decoding_ppaf_pphf.py) · [Figures](docs/figures/example05/) | -from nstat.paper_examples import run_paper_examples +Expanded paper-example index and figure gallery: +- [docs/paper_examples.md](docs/paper_examples.md) -repo_root = Path(".").resolve() -results, payloads = run_paper_examples(repo_root, return_plot_data=True) -print(results["experiment2"]) -print(results["experiment3"]) -print(results["experiment4"]) -print(results["experiment5"]) -``` +### Supplementary Examples + +These smaller demos remain useful as quick install and plotting checks. -**Expected output** -![nSTATPaperExamples overview](examples/readme_examples/images/readme_example4_nstatpaperexamples_overview.png) - -Complete catalog of nSTATPaperExamples notebooks: - -- [AnalysisExamples](notebooks/AnalysisExamples.ipynb) — Notebook example for AnalysisExamples. -- [ConfigCollExamples](notebooks/ConfigCollExamples.ipynb) — Notebook example for ConfigCollExamples. -- [CovCollExamples](notebooks/CovCollExamples.ipynb) — Notebook example for CovCollExamples. -- [CovariateExamples](notebooks/CovariateExamples.ipynb) — Notebook example for CovariateExamples. -- [DecodingExample](notebooks/DecodingExample.ipynb) — Notebook example for DecodingExample. -- [DecodingExampleWithHist](notebooks/DecodingExampleWithHist.ipynb) — Notebook example for DecodingExampleWithHist. -- [EventsExamples](notebooks/EventsExamples.ipynb) — Notebook example for EventsExamples. -- [ExplicitStimulusWhiskerData](notebooks/ExplicitStimulusWhiskerData.ipynb) — Notebook example for ExplicitStimulusWhiskerData. -- [FitResSummaryExamples](notebooks/FitResSummaryExamples.ipynb) — Notebook example for FitResSummaryExamples. -- [FitResultExamples](notebooks/FitResultExamples.ipynb) — Notebook example for FitResultExamples. -- [HippocampalPlaceCellExample](notebooks/HippocampalPlaceCellExample.ipynb) — Notebook example for HippocampalPlaceCellExample. -- [HistoryExamples](notebooks/HistoryExamples.ipynb) — Notebook example for HistoryExamples. -- [NetworkTutorial](notebooks/NetworkTutorial.ipynb) — Notebook example for NetworkTutorial. -- [PPSimExample](notebooks/PPSimExample.ipynb) — Notebook example for PPSimExample. -- [PPThinning](notebooks/PPThinning.ipynb) — Notebook example for PPThinning. -- [PSTHEstimation](notebooks/PSTHEstimation.ipynb) — Notebook example for PSTHEstimation. -- [SignalObjExamples](notebooks/SignalObjExamples.ipynb) — Notebook example for SignalObjExamples. -- [StimulusDecode2D](notebooks/StimulusDecode2D.ipynb) — Notebook example for StimulusDecode2D. -- [TrialConfigExamples](notebooks/TrialConfigExamples.ipynb) — Notebook example for TrialConfigExamples. -- [TrialExamples](notebooks/TrialExamples.ipynb) — Notebook example for TrialExamples. -- [ValidationDataSet](notebooks/ValidationDataSet.ipynb) — Notebook example for ValidationDataSet. -- [mEPSCAnalysis](notebooks/mEPSCAnalysis.ipynb) — Notebook example for mEPSCAnalysis. -- [nSTATPaperExamples](notebooks/nSTATPaperExamples.ipynb) — Notebook example for nSTATPaperExamples. -- [nSpikeTrainExamples](notebooks/nSpikeTrainExamples.ipynb) — Notebook example for nSpikeTrainExamples. -- [nstCollExamples](notebooks/nstCollExamples.ipynb) — Notebook example for nstCollExamples. -- [AnalysisExamples2](notebooks/AnalysisExamples2.ipynb) — Notebook example for AnalysisExamples2. -- [FitResultReference](notebooks/FitResultReference.ipynb) — Notebook example for FitResultReference. -- [HybridFilterExample](notebooks/HybridFilterExample.ipynb) — Notebook example for HybridFilterExample. +| Example | Run command | Output | +|---|---|---| +| Multitaper spectrum + spectrogram | `python examples/readme_examples/example1_multitaper_and_spectrogram.py` | [PNG](examples/readme_examples/images/readme_example1_multitaper_and_spectrogram.png) | +| Simulated CIF spike train | `python examples/readme_examples/example2_simulate_cif_spiketrain_10s.py` | [PNG](examples/readme_examples/images/readme_example2_simulate_cif_spiketrain_10s.png) | +| Spike-train raster | `python examples/readme_examples/example3_nstcoll_raster_from_example2.py` | [PNG](examples/readme_examples/images/readme_example3_nstcoll_raster.png) | ## Documentation diff --git a/__init__.py b/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/docs/conf.py b/docs/conf.py index ed1e91ea..8c198593 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,6 +1,6 @@ project = "nSTAT Python" author = "Cajigas Lab" release = "0.1.0" -extensions = [] +extensions = ["myst_parser"] exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] master_doc = "index" diff --git a/docs/data_installation.rst b/docs/data_installation.rst index 91d633c2..cb547796 100644 --- a/docs/data_installation.rst +++ b/docs/data_installation.rst @@ -12,6 +12,12 @@ Command line nstat-install --download-example-data always +Module invocation also works: + +.. code-block:: bash + + python -m nstat.install --download-example-data never --no-rebuild-doc-search + Python API ---------- @@ -22,9 +28,21 @@ Python API data_dir = ensure_example_data(download=True) print(data_dir) +MATLAB-compatible paper-data helper +----------------------------------- + +.. code-block:: python + + from nstat import getPaperDataDirs, get_paper_data_dirs + + data_dir, mepsc_dir, explicit_stimulus_dir, psth_dir, place_cell_data_dir = getPaperDataDirs() + dirs = get_paper_data_dirs() + assert dirs.data_dir == data_dir + Notes ----- - Example data is cached under ``data_cache/`` by default. - Set ``NSTAT_DATA_DIR`` to point at an existing dataset cache if needed. - The repository intentionally ignores ``data/`` so local example-data installs are not committed. +- ``clean_user_path_prefs`` is accepted only as a MATLAB-compatibility no-op. diff --git a/docs/figures/example01/README.md b/docs/figures/example01/README.md new file mode 100644 index 00000000..467fffca --- /dev/null +++ b/docs/figures/example01/README.md @@ -0,0 +1,17 @@ +# example01 + +Generated figure outputs for `example01_mepsc_poisson`. + +## Figures + +### fig01_constant_mg_summary.png + +![fig01_constant_mg_summary](./fig01_constant_mg_summary.png) + +### fig02_washout_raster_overview.png + +![fig02_washout_raster_overview](./fig02_washout_raster_overview.png) + +### fig03_piecewise_baseline_comparison.png + +![fig03_piecewise_baseline_comparison](./fig03_piecewise_baseline_comparison.png) diff --git a/docs/figures/example01/fig01_constant_mg_summary.png b/docs/figures/example01/fig01_constant_mg_summary.png new file mode 100644 index 00000000..d5c239df Binary files /dev/null and b/docs/figures/example01/fig01_constant_mg_summary.png differ diff --git a/docs/figures/example01/fig02_washout_raster_overview.png b/docs/figures/example01/fig02_washout_raster_overview.png new file mode 100644 index 00000000..2aed7129 Binary files /dev/null and b/docs/figures/example01/fig02_washout_raster_overview.png differ diff --git a/docs/figures/example01/fig03_piecewise_baseline_comparison.png b/docs/figures/example01/fig03_piecewise_baseline_comparison.png new file mode 100644 index 00000000..1d8656a9 Binary files /dev/null and b/docs/figures/example01/fig03_piecewise_baseline_comparison.png differ diff --git a/docs/figures/example02/README.md b/docs/figures/example02/README.md new file mode 100644 index 00000000..59a575cb --- /dev/null +++ b/docs/figures/example02/README.md @@ -0,0 +1,13 @@ +# example02 + +Generated figure outputs for `example02_whisker_stimulus_thalamus`. + +## Figures + +### fig01_data_overview.png + +![fig01_data_overview](./fig01_data_overview.png) + +### fig02_lag_and_model_comparison.png + +![fig02_lag_and_model_comparison](./fig02_lag_and_model_comparison.png) diff --git a/docs/figures/example02/fig01_data_overview.png b/docs/figures/example02/fig01_data_overview.png new file mode 100644 index 00000000..cc2d587d Binary files /dev/null and b/docs/figures/example02/fig01_data_overview.png differ diff --git a/docs/figures/example02/fig02_lag_and_model_comparison.png b/docs/figures/example02/fig02_lag_and_model_comparison.png new file mode 100644 index 00000000..5cfed19a Binary files /dev/null and b/docs/figures/example02/fig02_lag_and_model_comparison.png differ diff --git a/docs/figures/example03/README.md b/docs/figures/example03/README.md new file mode 100644 index 00000000..13f1dc41 --- /dev/null +++ b/docs/figures/example03/README.md @@ -0,0 +1,29 @@ +# example03 + +Generated figure outputs for `example03_psth_and_ssglm`. + +## Figures + +### fig01_simulated_and_real_rasters.png + +![fig01_simulated_and_real_rasters](./fig01_simulated_and_real_rasters.png) + +### fig02_psth_comparison.png + +![fig02_psth_comparison](./fig02_psth_comparison.png) + +### fig03_ssglm_simulation_summary.png + +![fig03_ssglm_simulation_summary](./fig03_ssglm_simulation_summary.png) + +### fig04_ssglm_fit_diagnostics.png + +![fig04_ssglm_fit_diagnostics](./fig04_ssglm_fit_diagnostics.png) + +### fig05_stimulus_effect_surfaces.png + +![fig05_stimulus_effect_surfaces](./fig05_stimulus_effect_surfaces.png) + +### fig06_learning_trial_comparison.png + +![fig06_learning_trial_comparison](./fig06_learning_trial_comparison.png) diff --git a/docs/figures/example03/fig01_simulated_and_real_rasters.png b/docs/figures/example03/fig01_simulated_and_real_rasters.png new file mode 100644 index 00000000..de01c385 Binary files /dev/null and b/docs/figures/example03/fig01_simulated_and_real_rasters.png differ diff --git a/docs/figures/example03/fig02_psth_comparison.png b/docs/figures/example03/fig02_psth_comparison.png new file mode 100644 index 00000000..c1ebfe65 Binary files /dev/null and b/docs/figures/example03/fig02_psth_comparison.png differ diff --git a/docs/figures/example03/fig03_ssglm_simulation_summary.png b/docs/figures/example03/fig03_ssglm_simulation_summary.png new file mode 100644 index 00000000..38dbf975 Binary files /dev/null and b/docs/figures/example03/fig03_ssglm_simulation_summary.png differ diff --git a/docs/figures/example03/fig04_ssglm_fit_diagnostics.png b/docs/figures/example03/fig04_ssglm_fit_diagnostics.png new file mode 100644 index 00000000..a4b82a48 Binary files /dev/null and b/docs/figures/example03/fig04_ssglm_fit_diagnostics.png differ diff --git a/docs/figures/example03/fig05_stimulus_effect_surfaces.png b/docs/figures/example03/fig05_stimulus_effect_surfaces.png new file mode 100644 index 00000000..dae2c7fd Binary files /dev/null and b/docs/figures/example03/fig05_stimulus_effect_surfaces.png differ diff --git a/docs/figures/example03/fig06_learning_trial_comparison.png b/docs/figures/example03/fig06_learning_trial_comparison.png new file mode 100644 index 00000000..e2d6c7aa Binary files /dev/null and b/docs/figures/example03/fig06_learning_trial_comparison.png differ diff --git a/docs/figures/example04/README.md b/docs/figures/example04/README.md new file mode 100644 index 00000000..57130f84 --- /dev/null +++ b/docs/figures/example04/README.md @@ -0,0 +1,33 @@ +# example04 + +Generated figure outputs for `example04_place_cells_continuous_stimulus`. + +## Figures + +### fig01_example_cells_path_overlay.png + +![fig01_example_cells_path_overlay](./fig01_example_cells_path_overlay.png) + +### fig02_model_summary_statistics.png + +![fig02_model_summary_statistics](./fig02_model_summary_statistics.png) + +### fig03_gaussian_place_fields_animal1.png + +![fig03_gaussian_place_fields_animal1](./fig03_gaussian_place_fields_animal1.png) + +### fig04_zernike_place_fields_animal1.png + +![fig04_zernike_place_fields_animal1](./fig04_zernike_place_fields_animal1.png) + +### fig05_gaussian_place_fields_animal2.png + +![fig05_gaussian_place_fields_animal2](./fig05_gaussian_place_fields_animal2.png) + +### fig06_zernike_place_fields_animal2.png + +![fig06_zernike_place_fields_animal2](./fig06_zernike_place_fields_animal2.png) + +### fig07_example_cell_mesh_comparison.png + +![fig07_example_cell_mesh_comparison](./fig07_example_cell_mesh_comparison.png) diff --git a/docs/figures/example04/fig01_example_cells_path_overlay.png b/docs/figures/example04/fig01_example_cells_path_overlay.png new file mode 100644 index 00000000..657c9df5 Binary files /dev/null and b/docs/figures/example04/fig01_example_cells_path_overlay.png differ diff --git a/docs/figures/example04/fig02_model_summary_statistics.png b/docs/figures/example04/fig02_model_summary_statistics.png new file mode 100644 index 00000000..251a0ce4 Binary files /dev/null and b/docs/figures/example04/fig02_model_summary_statistics.png differ diff --git a/docs/figures/example04/fig03_gaussian_place_fields_animal1.png b/docs/figures/example04/fig03_gaussian_place_fields_animal1.png new file mode 100644 index 00000000..cf3d564e Binary files /dev/null and b/docs/figures/example04/fig03_gaussian_place_fields_animal1.png differ diff --git a/docs/figures/example04/fig04_zernike_place_fields_animal1.png b/docs/figures/example04/fig04_zernike_place_fields_animal1.png new file mode 100644 index 00000000..e328b556 Binary files /dev/null and b/docs/figures/example04/fig04_zernike_place_fields_animal1.png differ diff --git a/docs/figures/example04/fig05_gaussian_place_fields_animal2.png b/docs/figures/example04/fig05_gaussian_place_fields_animal2.png new file mode 100644 index 00000000..da7fbdcd Binary files /dev/null and b/docs/figures/example04/fig05_gaussian_place_fields_animal2.png differ diff --git a/docs/figures/example04/fig06_zernike_place_fields_animal2.png b/docs/figures/example04/fig06_zernike_place_fields_animal2.png new file mode 100644 index 00000000..abff34e5 Binary files /dev/null and b/docs/figures/example04/fig06_zernike_place_fields_animal2.png differ diff --git a/docs/figures/example04/fig07_example_cell_mesh_comparison.png b/docs/figures/example04/fig07_example_cell_mesh_comparison.png new file mode 100644 index 00000000..cc4fe152 Binary files /dev/null and b/docs/figures/example04/fig07_example_cell_mesh_comparison.png differ diff --git a/docs/figures/example05/README.md b/docs/figures/example05/README.md new file mode 100644 index 00000000..f7cf7041 --- /dev/null +++ b/docs/figures/example05/README.md @@ -0,0 +1,29 @@ +# example05 + +Generated figure outputs for `example05_decoding_ppaf_pphf`. + +## Figures + +### fig01_univariate_setup.png + +![fig01_univariate_setup](./fig01_univariate_setup.png) + +### fig02_univariate_decoding.png + +![fig02_univariate_decoding](./fig02_univariate_decoding.png) + +### fig03_reach_and_population_setup.png + +![fig03_reach_and_population_setup](./fig03_reach_and_population_setup.png) + +### fig04_ppaf_goal_vs_free.png + +![fig04_ppaf_goal_vs_free](./fig04_ppaf_goal_vs_free.png) + +### fig05_hybrid_setup.png + +![fig05_hybrid_setup](./fig05_hybrid_setup.png) + +### fig06_hybrid_decoding_summary.png + +![fig06_hybrid_decoding_summary](./fig06_hybrid_decoding_summary.png) diff --git a/docs/figures/example05/fig01_univariate_setup.png b/docs/figures/example05/fig01_univariate_setup.png new file mode 100644 index 00000000..97886d62 Binary files /dev/null and b/docs/figures/example05/fig01_univariate_setup.png differ diff --git a/docs/figures/example05/fig02_univariate_decoding.png b/docs/figures/example05/fig02_univariate_decoding.png new file mode 100644 index 00000000..778bbc2d Binary files /dev/null and b/docs/figures/example05/fig02_univariate_decoding.png differ diff --git a/docs/figures/example05/fig03_reach_and_population_setup.png b/docs/figures/example05/fig03_reach_and_population_setup.png new file mode 100644 index 00000000..a5723160 Binary files /dev/null and b/docs/figures/example05/fig03_reach_and_population_setup.png differ diff --git a/docs/figures/example05/fig04_ppaf_goal_vs_free.png b/docs/figures/example05/fig04_ppaf_goal_vs_free.png new file mode 100644 index 00000000..b7ef94ba Binary files /dev/null and b/docs/figures/example05/fig04_ppaf_goal_vs_free.png differ diff --git a/docs/figures/example05/fig05_hybrid_setup.png b/docs/figures/example05/fig05_hybrid_setup.png new file mode 100644 index 00000000..30883b47 Binary files /dev/null and b/docs/figures/example05/fig05_hybrid_setup.png differ diff --git a/docs/figures/example05/fig06_hybrid_decoding_summary.png b/docs/figures/example05/fig06_hybrid_decoding_summary.png new file mode 100644 index 00000000..b2cea5f7 Binary files /dev/null and b/docs/figures/example05/fig06_hybrid_decoding_summary.png differ diff --git a/docs/figures/manifest.json b/docs/figures/manifest.json new file mode 100644 index 00000000..41e3a3c5 --- /dev/null +++ b/docs/figures/manifest.json @@ -0,0 +1,112 @@ +{ + "figure_root": "docs/figures", + "examples": [ + { + "example_id": "example01", + "title": "mEPSC Poisson Models Under Constant and Washout Magnesium", + "source_script": "examples/paper/example01_mepsc_poisson.py", + "matlab_source": "examples/paper/example01_mepsc_poisson.m", + "description": "Fits constant and piecewise Poisson GLM baselines to mEPSC spike trains.", + "question": "Does Mg2+ washout produce firing-rate dynamics beyond a constant Poisson baseline?", + "run_command": "python examples/paper/example01_mepsc_poisson.py", + "figure_dir": "docs/figures/example01", + "figure_files": [ + "docs/figures/example01/fig01_constant_mg_summary.png", + "docs/figures/example01/fig02_washout_raster_overview.png", + "docs/figures/example01/fig03_piecewise_baseline_comparison.png" + ], + "thumbnail_file": "docs/figures/example01/fig01_constant_mg_summary.png", + "sections": [ + "experiment1" + ] + }, + { + "example_id": "example02", + "title": "Whisker Stimulus GLM With Lag and History Selection", + "source_script": "examples/paper/example02_whisker_stimulus_thalamus.py", + "matlab_source": "examples/paper/example02_whisker_stimulus_thalamus.m", + "description": "Fits explicit-stimulus point-process GLMs and compares baseline, stimulus, and history models.", + "question": "What stimulus lag and history order best explain whisker-evoked spike trains?", + "run_command": "python examples/paper/example02_whisker_stimulus_thalamus.py", + "figure_dir": "docs/figures/example02", + "figure_files": [ + "docs/figures/example02/fig01_data_overview.png", + "docs/figures/example02/fig02_lag_and_model_comparison.png" + ], + "thumbnail_file": "docs/figures/example02/fig01_data_overview.png", + "sections": [ + "experiment2" + ] + }, + { + "example_id": "example03", + "title": "PSTH and SSGLM Dynamics Example", + "source_script": "examples/paper/example03_psth_and_ssglm.py", + "matlab_source": "examples/paper/example03_psth_and_ssglm.m", + "description": "Bundles simulated PSTH and SSGLM examples from the canonical paper workflow.", + "question": "How do PSTH and SSGLM differ in capturing trial learning dynamics?", + "run_command": "python examples/paper/example03_psth_and_ssglm.py", + "figure_dir": "docs/figures/example03", + "figure_files": [ + "docs/figures/example03/fig01_simulated_and_real_rasters.png", + "docs/figures/example03/fig02_psth_comparison.png", + "docs/figures/example03/fig03_ssglm_simulation_summary.png", + "docs/figures/example03/fig04_ssglm_fit_diagnostics.png", + "docs/figures/example03/fig05_stimulus_effect_surfaces.png", + "docs/figures/example03/fig06_learning_trial_comparison.png" + ], + "thumbnail_file": "docs/figures/example03/fig01_simulated_and_real_rasters.png", + "sections": [ + "experiment3", + "experiment3b" + ] + }, + { + "example_id": "example04", + "title": "Place-Cell Receptive Fields (Gaussian vs Zernike)", + "source_script": "examples/paper/example04_place_cells_continuous_stimulus.py", + "matlab_source": "examples/paper/example04_place_cells_continuous_stimulus.m", + "description": "Loads place-cell datasets and compares receptive-field model families.", + "question": "How do Gaussian and Zernike basis models compare for place-field mapping?", + "run_command": "python examples/paper/example04_place_cells_continuous_stimulus.py", + "figure_dir": "docs/figures/example04", + "figure_files": [ + "docs/figures/example04/fig01_example_cells_path_overlay.png", + "docs/figures/example04/fig02_model_summary_statistics.png", + "docs/figures/example04/fig03_gaussian_place_fields_animal1.png", + "docs/figures/example04/fig04_zernike_place_fields_animal1.png", + "docs/figures/example04/fig05_gaussian_place_fields_animal2.png", + "docs/figures/example04/fig06_zernike_place_fields_animal2.png", + "docs/figures/example04/fig07_example_cell_mesh_comparison.png" + ], + "thumbnail_file": "docs/figures/example04/fig01_example_cells_path_overlay.png", + "sections": [ + "experiment4" + ] + }, + { + "example_id": "example05", + "title": "Stimulus Decoding With PPAF and PPHF", + "source_script": "examples/paper/example05_decoding_ppaf_pphf.py", + "matlab_source": "examples/paper/example05_decoding_ppaf_pphf.m", + "description": "Bundles univariate, reaching, and hybrid decoding examples from the paper workflow.", + "question": "How accurately can neural populations decode latent stimulus and reach state?", + "run_command": "python examples/paper/example05_decoding_ppaf_pphf.py", + "figure_dir": "docs/figures/example05", + "figure_files": [ + "docs/figures/example05/fig01_univariate_setup.png", + "docs/figures/example05/fig02_univariate_decoding.png", + "docs/figures/example05/fig03_reach_and_population_setup.png", + "docs/figures/example05/fig04_ppaf_goal_vs_free.png", + "docs/figures/example05/fig05_hybrid_setup.png", + "docs/figures/example05/fig06_hybrid_decoding_summary.png" + ], + "thumbnail_file": "docs/figures/example05/fig01_univariate_setup.png", + "sections": [ + "experiment5", + "experiment5b", + "experiment6" + ] + } + ] +} \ No newline at end of file diff --git a/docs/index.rst b/docs/index.rst index b80ec5dd..e22732c0 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -8,3 +8,4 @@ Minimal documentation for the standalone Python nSTAT core package. api data_installation + paper_examples diff --git a/docs/paper_examples.md b/docs/paper_examples.md new file mode 100644 index 00000000..ddb99a36 --- /dev/null +++ b/docs/paper_examples.md @@ -0,0 +1,114 @@ +# nSTAT Python Paper Examples + +This page mirrors the MATLAB paper-example index for the standalone Python port. + +Canonical source files: +- `examples/paper/*.py` +- `nstat/paper_examples_full.py` + +## Run Everything + +```bash +python tools/paper_examples/build_gallery.py +``` + +Outputs: +- Figure metadata: `docs/figures/manifest.json` +- Gallery page: `docs/paper_examples.md` +- Figures: `docs/figures/example01/` ... `docs/figures/example05/` + +## Example Index + +| ID | Thumbnail | Standalone source | Question | Run command | Figure gallery | +|---|---|---|---|---|---| +| `example01` | ![Example 01](figures/example01/fig01_constant_mg_summary.png) | [example01_mepsc_poisson.py](../examples/paper/example01_mepsc_poisson.py) | Does Mg2+ washout produce firing-rate dynamics beyond a constant Poisson baseline? | `python examples/paper/example01_mepsc_poisson.py` | [gallery page](./figures/example01/README.md) | +| `example02` | ![Example 02](figures/example02/fig01_data_overview.png) | [example02_whisker_stimulus_thalamus.py](../examples/paper/example02_whisker_stimulus_thalamus.py) | What stimulus lag and history order best explain whisker-evoked spike trains? | `python examples/paper/example02_whisker_stimulus_thalamus.py` | [gallery page](./figures/example02/README.md) | +| `example03` | ![Example 03](figures/example03/fig01_simulated_and_real_rasters.png) | [example03_psth_and_ssglm.py](../examples/paper/example03_psth_and_ssglm.py) | How do PSTH and SSGLM differ in capturing trial learning dynamics? | `python examples/paper/example03_psth_and_ssglm.py` | [gallery page](./figures/example03/README.md) | +| `example04` | ![Example 04](figures/example04/fig01_example_cells_path_overlay.png) | [example04_place_cells_continuous_stimulus.py](../examples/paper/example04_place_cells_continuous_stimulus.py) | How do Gaussian and Zernike basis models compare for place-field mapping? | `python examples/paper/example04_place_cells_continuous_stimulus.py` | [gallery page](./figures/example04/README.md) | +| `example05` | ![Example 05](figures/example05/fig01_univariate_setup.png) | [example05_decoding_ppaf_pphf.py](../examples/paper/example05_decoding_ppaf_pphf.py) | How accurately can neural populations decode latent stimulus and reach state? | `python examples/paper/example05_decoding_ppaf_pphf.py` | [gallery page](./figures/example05/README.md) | + +```{toctree} +:hidden: + +figures/example01/README +figures/example02/README +figures/example03/README +figures/example04/README +figures/example05/README +``` + +## Gallery + +### Example 01: mEPSC Poisson Models Under Constant and Washout Magnesium + +Question: Does Mg2+ washout produce firing-rate dynamics beyond a constant Poisson baseline? + +Run command: `python examples/paper/example01_mepsc_poisson.py` + +![Example 01](figures/example01/fig01_constant_mg_summary.png) + +Expected figure files: +- `docs/figures/example01/fig01_constant_mg_summary.png` +- `docs/figures/example01/fig02_washout_raster_overview.png` +- `docs/figures/example01/fig03_piecewise_baseline_comparison.png` + +### Example 02: Whisker Stimulus GLM With Lag and History Selection + +Question: What stimulus lag and history order best explain whisker-evoked spike trains? + +Run command: `python examples/paper/example02_whisker_stimulus_thalamus.py` + +![Example 02](figures/example02/fig01_data_overview.png) + +Expected figure files: +- `docs/figures/example02/fig01_data_overview.png` +- `docs/figures/example02/fig02_lag_and_model_comparison.png` + +### Example 03: PSTH and SSGLM Dynamics Example + +Question: How do PSTH and SSGLM differ in capturing trial learning dynamics? + +Run command: `python examples/paper/example03_psth_and_ssglm.py` + +![Example 03](figures/example03/fig01_simulated_and_real_rasters.png) + +Expected figure files: +- `docs/figures/example03/fig01_simulated_and_real_rasters.png` +- `docs/figures/example03/fig02_psth_comparison.png` +- `docs/figures/example03/fig03_ssglm_simulation_summary.png` +- `docs/figures/example03/fig04_ssglm_fit_diagnostics.png` +- `docs/figures/example03/fig05_stimulus_effect_surfaces.png` +- `docs/figures/example03/fig06_learning_trial_comparison.png` + +### Example 04: Place-Cell Receptive Fields (Gaussian vs Zernike) + +Question: How do Gaussian and Zernike basis models compare for place-field mapping? + +Run command: `python examples/paper/example04_place_cells_continuous_stimulus.py` + +![Example 04](figures/example04/fig01_example_cells_path_overlay.png) + +Expected figure files: +- `docs/figures/example04/fig01_example_cells_path_overlay.png` +- `docs/figures/example04/fig02_model_summary_statistics.png` +- `docs/figures/example04/fig03_gaussian_place_fields_animal1.png` +- `docs/figures/example04/fig04_zernike_place_fields_animal1.png` +- `docs/figures/example04/fig05_gaussian_place_fields_animal2.png` +- `docs/figures/example04/fig06_zernike_place_fields_animal2.png` +- `docs/figures/example04/fig07_example_cell_mesh_comparison.png` + +### Example 05: Stimulus Decoding With PPAF and PPHF + +Question: How accurately can neural populations decode latent stimulus and reach state? + +Run command: `python examples/paper/example05_decoding_ppaf_pphf.py` + +![Example 05](figures/example05/fig01_univariate_setup.png) + +Expected figure files: +- `docs/figures/example05/fig01_univariate_setup.png` +- `docs/figures/example05/fig02_univariate_decoding.png` +- `docs/figures/example05/fig03_reach_and_population_setup.png` +- `docs/figures/example05/fig04_ppaf_goal_vs_free.png` +- `docs/figures/example05/fig05_hybrid_setup.png` +- `docs/figures/example05/fig06_hybrid_decoding_summary.png` diff --git a/examples/nSTATPaperExamples/manifest.yml b/examples/nSTATPaperExamples/manifest.yml index bbf03949..3c9e8e09 100644 --- a/examples/nSTATPaperExamples/manifest.yml +++ b/examples/nSTATPaperExamples/manifest.yml @@ -78,6 +78,9 @@ examples: - name: AnalysisExamples2 relative_path: notebooks/AnalysisExamples2.ipynb description: Notebook example for AnalysisExamples2. +- name: ConfidenceIntervalOverview + relative_path: notebooks/ConfidenceIntervalOverview.ipynb + description: Notebook example for ConfidenceIntervalOverview. - name: FitResultReference relative_path: notebooks/FitResultReference.ipynb description: Notebook example for FitResultReference. diff --git a/examples/paper/example01_mepsc_poisson.py b/examples/paper/example01_mepsc_poisson.py new file mode 100644 index 00000000..4ba1e9f1 --- /dev/null +++ b/examples/paper/example01_mepsc_poisson.py @@ -0,0 +1,16 @@ +from __future__ import annotations + +import sys +from pathlib import Path + + +THIS_DIR = Path(__file__).resolve().parent +REPO_ROOT = THIS_DIR.parents[1] +if str(REPO_ROOT) not in sys.path: + sys.path.insert(0, str(REPO_ROOT)) + +from nstat.paper_example_catalog import main_for + + +if __name__ == "__main__": + raise SystemExit(main_for("example01")) diff --git a/examples/paper/example02_whisker_stimulus_thalamus.py b/examples/paper/example02_whisker_stimulus_thalamus.py new file mode 100644 index 00000000..74951c97 --- /dev/null +++ b/examples/paper/example02_whisker_stimulus_thalamus.py @@ -0,0 +1,16 @@ +from __future__ import annotations + +import sys +from pathlib import Path + + +THIS_DIR = Path(__file__).resolve().parent +REPO_ROOT = THIS_DIR.parents[1] +if str(REPO_ROOT) not in sys.path: + sys.path.insert(0, str(REPO_ROOT)) + +from nstat.paper_example_catalog import main_for + + +if __name__ == "__main__": + raise SystemExit(main_for("example02")) diff --git a/examples/paper/example03_psth_and_ssglm.py b/examples/paper/example03_psth_and_ssglm.py new file mode 100644 index 00000000..0264dc21 --- /dev/null +++ b/examples/paper/example03_psth_and_ssglm.py @@ -0,0 +1,16 @@ +from __future__ import annotations + +import sys +from pathlib import Path + + +THIS_DIR = Path(__file__).resolve().parent +REPO_ROOT = THIS_DIR.parents[1] +if str(REPO_ROOT) not in sys.path: + sys.path.insert(0, str(REPO_ROOT)) + +from nstat.paper_example_catalog import main_for + + +if __name__ == "__main__": + raise SystemExit(main_for("example03")) diff --git a/examples/paper/example04_place_cells_continuous_stimulus.py b/examples/paper/example04_place_cells_continuous_stimulus.py new file mode 100644 index 00000000..22f03770 --- /dev/null +++ b/examples/paper/example04_place_cells_continuous_stimulus.py @@ -0,0 +1,16 @@ +from __future__ import annotations + +import sys +from pathlib import Path + + +THIS_DIR = Path(__file__).resolve().parent +REPO_ROOT = THIS_DIR.parents[1] +if str(REPO_ROOT) not in sys.path: + sys.path.insert(0, str(REPO_ROOT)) + +from nstat.paper_example_catalog import main_for + + +if __name__ == "__main__": + raise SystemExit(main_for("example04")) diff --git a/examples/paper/example05_decoding_ppaf_pphf.py b/examples/paper/example05_decoding_ppaf_pphf.py new file mode 100644 index 00000000..58cd9113 --- /dev/null +++ b/examples/paper/example05_decoding_ppaf_pphf.py @@ -0,0 +1,16 @@ +from __future__ import annotations + +import sys +from pathlib import Path + + +THIS_DIR = Path(__file__).resolve().parent +REPO_ROOT = THIS_DIR.parents[1] +if str(REPO_ROOT) not in sys.path: + sys.path.insert(0, str(REPO_ROOT)) + +from nstat.paper_example_catalog import main_for + + +if __name__ == "__main__": + raise SystemExit(main_for("example05")) diff --git a/examples/paper/manifest.yml b/examples/paper/manifest.yml new file mode 100644 index 00000000..0aebfb56 --- /dev/null +++ b/examples/paper/manifest.yml @@ -0,0 +1,79 @@ +version: 1 +examples: + - example_id: example01 + name: example01_mepsc_poisson + script: examples/paper/example01_mepsc_poisson.py + matlab_source: examples/paper/example01_mepsc_poisson.m + title: mEPSC Poisson Models Under Constant and Washout Magnesium + question: Does Mg2+ washout produce firing-rate dynamics beyond a constant Poisson baseline? + description: Fits constant and piecewise Poisson GLM baselines to mEPSC spike trains. + sections: + - experiment1 + figure_files: + - docs/figures/example01/fig01_constant_mg_summary.png + - docs/figures/example01/fig02_washout_raster_overview.png + - docs/figures/example01/fig03_piecewise_baseline_comparison.png + - example_id: example02 + name: example02_whisker_stimulus_thalamus + script: examples/paper/example02_whisker_stimulus_thalamus.py + matlab_source: examples/paper/example02_whisker_stimulus_thalamus.m + title: Whisker Stimulus GLM With Lag and History Selection + question: What stimulus lag and history order best explain whisker-evoked spike trains? + description: Fits explicit-stimulus point-process GLMs and compares baseline, stimulus, and history models. + sections: + - experiment2 + figure_files: + - docs/figures/example02/fig01_data_overview.png + - docs/figures/example02/fig02_lag_and_model_comparison.png + - example_id: example03 + name: example03_psth_and_ssglm + script: examples/paper/example03_psth_and_ssglm.py + matlab_source: examples/paper/example03_psth_and_ssglm.m + title: PSTH and SSGLM Dynamics Example + question: How do PSTH and SSGLM differ in capturing trial learning dynamics? + description: Bundles simulated PSTH and SSGLM examples from the canonical paper workflow. + sections: + - experiment3 + - experiment3b + figure_files: + - docs/figures/example03/fig01_simulated_and_real_rasters.png + - docs/figures/example03/fig02_psth_comparison.png + - docs/figures/example03/fig03_ssglm_simulation_summary.png + - docs/figures/example03/fig04_ssglm_fit_diagnostics.png + - docs/figures/example03/fig05_stimulus_effect_surfaces.png + - docs/figures/example03/fig06_learning_trial_comparison.png + - example_id: example04 + name: example04_place_cells_continuous_stimulus + script: examples/paper/example04_place_cells_continuous_stimulus.py + matlab_source: examples/paper/example04_place_cells_continuous_stimulus.m + title: Place-Cell Receptive Fields (Gaussian vs Zernike) + question: How do Gaussian and Zernike basis models compare for place-field mapping? + description: Loads place-cell datasets and compares receptive-field model families. + sections: + - experiment4 + figure_files: + - docs/figures/example04/fig01_example_cells_path_overlay.png + - docs/figures/example04/fig02_model_summary_statistics.png + - docs/figures/example04/fig03_gaussian_place_fields_animal1.png + - docs/figures/example04/fig04_zernike_place_fields_animal1.png + - docs/figures/example04/fig05_gaussian_place_fields_animal2.png + - docs/figures/example04/fig06_zernike_place_fields_animal2.png + - docs/figures/example04/fig07_example_cell_mesh_comparison.png + - example_id: example05 + name: example05_decoding_ppaf_pphf + script: examples/paper/example05_decoding_ppaf_pphf.py + matlab_source: examples/paper/example05_decoding_ppaf_pphf.m + title: Stimulus Decoding With PPAF and PPHF + question: How accurately can neural populations decode latent stimulus and reach state? + description: Bundles univariate, reaching, and hybrid decoding examples from the paper workflow. + sections: + - experiment5 + - experiment5b + - experiment6 + figure_files: + - docs/figures/example05/fig01_univariate_setup.png + - docs/figures/example05/fig02_univariate_decoding.png + - docs/figures/example05/fig03_reach_and_population_setup.png + - docs/figures/example05/fig04_ppaf_goal_vs_free.png + - docs/figures/example05/fig05_hybrid_setup.png + - docs/figures/example05/fig06_hybrid_decoding_summary.png diff --git a/notebooks/ConfidenceIntervalOverview.ipynb b/notebooks/ConfidenceIntervalOverview.ipynb new file mode 100644 index 00000000..bad3632b --- /dev/null +++ b/notebooks/ConfidenceIntervalOverview.ipynb @@ -0,0 +1,121 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "ec813f90", + "metadata": {}, + "source": [ + "# ConfidenceIntervalOverview\n", + "\n", + "This notebook is the Python-side overview for the `ConfidenceInterval` class. It mirrors the MATLAB help page by showing how lower and upper bounds are stored over time and how the object fits into the broader signal workflow." + ] + }, + { + "cell_type": "markdown", + "id": "8170a57e", + "metadata": {}, + "source": [ + "## Setup\n", + "\n", + "The canonical Python implementation lives in `nstat.confidence_interval`. A MATLAB-style compatibility adapter remains available as `nstat.ConfidenceInterval` when older examples need it." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e50c571b", + "metadata": {}, + "outputs": [], + "source": [ + "from __future__ import annotations\n", + "\n", + "import numpy as np\n", + "\n", + "from nstat.confidence_interval import ConfidenceInterval\n", + "\n", + "time = np.linspace(0.0, 1.0, 6)\n", + "mean = np.sin(2.0 * np.pi * time)\n", + "spread = np.full_like(mean, 0.15)\n", + "bounds = np.column_stack([mean - spread, mean + spread])\n", + "\n", + "ci = ConfidenceInterval(time, bounds, color=\"tab:blue\")\n", + "ci" + ] + }, + { + "cell_type": "markdown", + "id": "210494fc", + "metadata": {}, + "source": [ + "## Inspect the interval\n", + "\n", + "A `ConfidenceInterval` stores the time base plus a two-column bounds array. The `lower` and `upper` properties expose each side directly." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "badf84eb", + "metadata": {}, + "outputs": [], + "source": [ + "{\n", + " \"time\": ci.time.tolist(),\n", + " \"lower\": np.round(ci.lower, 3).tolist(),\n", + " \"upper\": np.round(ci.upper, 3).tolist(),\n", + " \"color\": ci.color,\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "91b1e03d", + "metadata": {}, + "source": [ + "## Common validation rule\n", + "\n", + "The bounds array must have shape `(n, 2)` and the same number of rows as the time vector. The constructor rejects malformed input immediately." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "270611b9", + "metadata": {}, + "outputs": [], + "source": [ + "try:\n", + " ConfidenceInterval(time, np.ones((len(time), 3)))\n", + "except ValueError as exc:\n", + " print(type(exc).__name__)\n", + " print(exc)" + ] + }, + { + "cell_type": "markdown", + "id": "b6e5190d", + "metadata": {}, + "source": [ + "## Related APIs\n", + "\n", + "- `nstat.SignalObj` and `nstat.signal.Signal` for general time-series containers\n", + "- `nstat.Covariate` for named covariate signals\n", + "- `nstat.getPaperDataDirs` / `nstat.get_paper_data_dirs` for canonical example-data discovery" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.12" + }, + "nstat": {} + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/nstat/__init__.py b/nstat/__init__.py index 796833b5..d96fc1dd 100644 --- a/nstat/__init__.py +++ b/nstat/__init__.py @@ -4,14 +4,15 @@ from .SignalObj import SignalObj from .analysis import Analysis, psth from .cif import CIF, CIFModel +from .data_manager import getPaperDataDirs, get_paper_data_dirs from .datasets import get_dataset_path, list_datasets, verify_checksums from .decoding import DecoderSuite from .decoding_algorithms import DecodingAlgorithms from .errors import DataNotFoundError, ParityValidationError, UnsupportedWorkflowError +from .events import Events from .fit import FitResSummary, FitResult, FitSummary from .glm import PoissonGLMResult, fit_poisson_glm from .history import History, HistoryBasis -from .install import nstat_install from .paper_examples_full import run_full_paper_examples from .signal import Covariate, Signal from .simulation import simulate_poisson_from_rate @@ -26,6 +27,18 @@ from .nspikeTrain import nspikeTrain from .nstColl import nstColl + +def __getattr__(name: str): + if name == "nstat_install": + from .install import nstat_install as _nstat_install + + return _nstat_install + if name == "nSTAT_Install": + from .nstat_install import nSTAT_Install as _nSTAT_Install + + return _nSTAT_Install + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") + __all__ = [ "Analysis", "CIF", @@ -39,6 +52,7 @@ "DataNotFoundError", "DecoderSuite", "DecodingAlgorithms", + "Events", "FitResSummary", "FitResult", "FitSummary", @@ -56,8 +70,11 @@ "TrialConfig", "UnsupportedWorkflowError", "fit_poisson_glm", + "getPaperDataDirs", + "get_paper_data_dirs", "get_dataset_path", "list_datasets", + "nSTAT_Install", "nstat_install", "psth", "run_full_paper_examples", diff --git a/nstat/compat/matlab/__init__.py b/nstat/compat/matlab/__init__.py index d2cf90a5..5395fde7 100644 --- a/nstat/compat/matlab/__init__.py +++ b/nstat/compat/matlab/__init__.py @@ -2,15 +2,8 @@ from __future__ import annotations -from ...ConfigColl import ConfigColl -from ...Covariate import Covariate -from ...FitResSummary import FitResSummary -from ...FitResult import FitResult -from ...SignalObj import SignalObj -from ...TrialConfig import TrialConfig +from ... import ConfigColl, Covariate, FitResSummary, FitResult, SignalObj, TrialConfig, nspikeTrain, nstColl from ...cif import CIF -from ...nspikeTrain import nspikeTrain -from ...nstColl import nstColl __all__ = [ "CIF", @@ -23,4 +16,3 @@ "nspikeTrain", "nstColl", ] - diff --git a/nstat/data_manager.py b/nstat/data_manager.py index 80df75c8..ffbf0db4 100644 --- a/nstat/data_manager.py +++ b/nstat/data_manager.py @@ -45,6 +45,17 @@ def is_installed(self) -> bool: return all(path.exists() for path in self.required_files) +@dataclass(frozen=True) +class PaperDataDirs: + """Resolved dataset directories used by the canonical paper examples.""" + + data_dir: Path + mepsc_dir: Path + explicit_stimulus_dir: Path + psth_dir: Path + place_cell_data_dir: Path + + def _repo_root() -> Path: return Path(__file__).resolve().parents[1] @@ -219,6 +230,35 @@ def data_is_present(data_dir: Path) -> bool: return get_example_data_info(data_dir).is_installed +def get_paper_data_dirs(*, download: bool = True) -> PaperDataDirs: + """Return the canonical paper-example data directories. + + This is the Python-native equivalent of MATLAB ``getPaperDataDirs``. + """ + + data_dir = ensure_example_data(download=download) + return PaperDataDirs( + data_dir=data_dir, + mepsc_dir=data_dir / "mEPSCs", + explicit_stimulus_dir=data_dir / "Explicit Stimulus", + psth_dir=data_dir / "PSTH", + place_cell_data_dir=data_dir / "Place Cells", + ) + + +def getPaperDataDirs(*, download: bool = True) -> tuple[Path, Path, Path, Path, Path]: + """MATLAB-style tuple-returning alias for :func:`get_paper_data_dirs`.""" + + dirs = get_paper_data_dirs(download=download) + return ( + dirs.data_dir, + dirs.mepsc_dir, + dirs.explicit_stimulus_dir, + dirs.psth_dir, + dirs.place_cell_data_dir, + ) + + def ensure_example_data(download: bool = True) -> Path: """Ensure the canonical example data exists locally and return its path.""" diff --git a/nstat/install.py b/nstat/install.py index 191995ec..d9c124db 100644 --- a/nstat/install.py +++ b/nstat/install.py @@ -3,7 +3,9 @@ from __future__ import annotations import argparse +import importlib.util import json +import subprocess import sys from pathlib import Path from typing import Any @@ -42,6 +44,47 @@ def _should_prompt_for_example_data(info: dict[str, Any]) -> bool: return answer.strip().lower() in {"y", "yes"} +def _rebuild_doc_search(repo_root: Path) -> dict[str, Any]: + docs_dir = repo_root / "docs" + output_dir = docs_dir / "_build" / "html" + search_index = output_dir / "searchindex.js" + report: dict[str, Any] = { + "requested": True, + "source_dir": str(docs_dir), + "output_dir": str(output_dir), + "search_index": str(search_index), + } + + if not (docs_dir / "conf.py").exists(): + report["status"] = "skipped" + report["reason"] = "Docs configuration was not found." + return report + + if importlib.util.find_spec("sphinx") is None: + report["status"] = "skipped" + report["reason"] = "Sphinx is not installed; install nstat-toolbox[dev] to rebuild docs search." + return report + + proc = subprocess.run( + [sys.executable, "-m", "sphinx", "-q", "-b", "html", "docs", str(output_dir)], + cwd=repo_root, + check=False, + capture_output=True, + text=True, + ) + if proc.returncode != 0: + report["status"] = "failed" + report["error"] = proc.stderr.strip() or proc.stdout.strip() or "Unknown sphinx build failure." + return report + + report["status"] = "rebuilt" + report["is_available"] = search_index.exists() + if not search_index.exists(): + report["status"] = "failed" + report["error"] = f"Expected search index was not created: {search_index}" + return report + + def nstat_install( *, rebuild_doc_search: bool = True, @@ -60,6 +103,11 @@ def nstat_install( "package_root": str(Path(__file__).resolve().parent), "rebuild_doc_search": bool(rebuild_doc_search), "clean_user_path_prefs": bool(clean_user_path_prefs), + "path_preferences": { + "requested": bool(clean_user_path_prefs), + "status": "not_applicable", + "reason": "Python packaging/import resolution replaces MATLAB user path preference cleanup.", + }, "download_example_data": mode, "example_data": { "data_dir": str(data_dir), @@ -71,6 +119,25 @@ def nstat_install( "notes": [], } + if rebuild_doc_search: + doc_search = _rebuild_doc_search(repo_root) + report["doc_search"] = doc_search + if doc_search["status"] == "rebuilt": + report["notes"].append("Rebuilt docs HTML search index.") + elif doc_search["status"] == "skipped": + report["notes"].append(f"Docs search rebuild skipped: {doc_search['reason']}") + else: + report["notes"].append("Docs search rebuild failed.") + else: + report["doc_search"] = { + "requested": False, + "status": "skipped", + "reason": "Disabled by caller.", + } + + if clean_user_path_prefs: + report["notes"].append("Clean user path preferences is a MATLAB-only option and is ignored in Python.") + try: if info.is_installed: report["example_data"]["is_installed"] = True @@ -106,7 +173,11 @@ def main() -> int: help="true/false or always/prompt/never", ) parser.add_argument("--no-rebuild-doc-search", action="store_true") - parser.add_argument("--clean-user-path-prefs", action="store_true") + parser.add_argument( + "--clean-user-path-prefs", + action="store_true", + help="MATLAB-compatibility no-op; Python does not maintain MATLAB-style user path preferences.", + ) args = parser.parse_args() raw_mode: str | bool @@ -124,4 +195,10 @@ def main() -> int: download_example_data=raw_mode, ) print(json.dumps(report, indent=2)) - return 0 if "error" not in report.get("example_data", {}) else 1 + example_data_failed = "error" in report.get("example_data", {}) + doc_search_failed = report.get("doc_search", {}).get("status") == "failed" + return 0 if not (example_data_failed or doc_search_failed) else 1 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/nstat/paper_example_catalog.py b/nstat/paper_example_catalog.py new file mode 100644 index 00000000..e030e245 --- /dev/null +++ b/nstat/paper_example_catalog.py @@ -0,0 +1,119 @@ +from __future__ import annotations + +import argparse +import json +from pathlib import Path +from typing import Any + +from .data_manager import ensure_example_data +from .paper_figures import default_export_dir, export_named_paper_figures +from .paper_examples_full import ( + _default_repo_root, + run_experiment1, + run_experiment2, + run_experiment3, + run_experiment3b, + run_experiment4, + run_experiment5, + run_experiment5b, + run_experiment6, +) + + +def run_named_paper_example( + example_id: str, repo_root: Path, *, return_payload: bool = False +) -> dict[str, dict[str, float]] | tuple[dict[str, dict[str, float]], dict[str, dict[str, object]]]: + repo_root = repo_root.resolve() + data_dir = ensure_example_data(download=True) + + if example_id == "example01": + if not return_payload: + return {"experiment1": run_experiment1(data_dir)} + summary, payload = run_experiment1(data_dir, return_payload=True) + return {"experiment1": summary}, {"experiment1": payload} + if example_id == "example02": + if not return_payload: + return {"experiment2": run_experiment2(data_dir)} + summary, payload = run_experiment2(data_dir, return_payload=True) + return {"experiment2": summary}, {"experiment2": payload} + if example_id == "example03": + if not return_payload: + return { + "experiment3": run_experiment3(), + "experiment3b": run_experiment3b(data_dir), + } + summary3, payload3 = run_experiment3(return_payload=True) + summary3b, payload3b = run_experiment3b(data_dir, return_payload=True) + return { + "experiment3": summary3, + "experiment3b": summary3b, + }, { + "experiment3": payload3, + "experiment3b": payload3b, + } + if example_id == "example04": + if not return_payload: + return {"experiment4": run_experiment4(data_dir)} + summary4, payload4 = run_experiment4(data_dir, return_payload=True) + return {"experiment4": summary4}, {"experiment4": payload4} + if example_id == "example05": + if not return_payload: + return { + "experiment5": run_experiment5(), + "experiment5b": run_experiment5b(), + "experiment6": run_experiment6(repo_root), + } + summary5, payload5 = run_experiment5(return_payload=True) + summary5b, payload5b = run_experiment5b(return_payload=True) + summary6, payload6 = run_experiment6(repo_root, return_payload=True) + return { + "experiment5": summary5, + "experiment5b": summary5b, + "experiment6": summary6, + }, { + "experiment5": payload5, + "experiment5b": payload5b, + "experiment6": payload6, + } + raise ValueError(f"Unknown paper example id: {example_id}") + + +def main_for(example_id: str) -> int: + parser = argparse.ArgumentParser(description=f"Run canonical nSTAT Python paper example {example_id}") + parser.add_argument("--repo-root", type=Path, default=_default_repo_root()) + parser.add_argument("--output-json", type=Path, default=None) + parser.add_argument("--export-figures", action="store_true", help="Export canonical figure files for this example.") + parser.add_argument("--export-dir", type=Path, default=None, help="Destination directory for exported figure files.") + args = parser.parse_args() + + if args.export_figures: + results, payloads = run_named_paper_example(example_id, args.repo_root, return_payload=True) + export_dir = (args.export_dir or default_export_dir(args.repo_root, example_id)).resolve() + if len(results) == 1: + section_name = next(iter(results)) + figure_summary = results[section_name] + figure_payload = payloads[section_name] + else: + figure_summary = results + figure_payload = payloads + saved_paths = export_named_paper_figures( + example_id, + summary=figure_summary, + payload=figure_payload, + export_dir=export_dir, + ) + else: + results = run_named_paper_example(example_id, args.repo_root) + saved_paths = [] + + if args.output_json is not None: + args.output_json.write_text(json.dumps(results, indent=2), encoding="utf-8") + print(json.dumps(results, indent=2)) + if saved_paths: + print("\nGenerated figures:") + for path in saved_paths: + print(str(path)) + return 0 + + +__all__ = ["main_for", "run_named_paper_example"] diff --git a/nstat/paper_examples_full.py b/nstat/paper_examples_full.py index 46fd24d3..815f57e4 100644 --- a/nstat/paper_examples_full.py +++ b/nstat/paper_examples_full.py @@ -7,6 +7,7 @@ import numpy as np from scipy.io import loadmat +from scipy.signal import correlate, correlation_lags from .analysis import psth from .data_manager import ensure_example_data @@ -63,6 +64,79 @@ def _history_matrix(y: np.ndarray, lags: list[int]) -> np.ndarray: return out +def _autocorrelation(values: np.ndarray, max_lag: int) -> tuple[np.ndarray, np.ndarray]: + centered = np.asarray(values, dtype=float).reshape(-1) - float(np.mean(values)) + if centered.size < 2 or float(np.var(centered)) <= 0.0: + return np.asarray([], dtype=float), np.asarray([], dtype=float) + corr = np.correlate(centered, centered, mode="full") + corr = corr[corr.size // 2 :] + corr = corr / corr[0] + lags = np.arange(corr.shape[0], dtype=float) + max_lag = int(min(max_lag, corr.shape[0] - 1)) + return lags[1 : max_lag + 1], corr[1 : max_lag + 1] + + +def _time_rescaled_uniforms(y: np.ndarray, lam_per_bin: np.ndarray) -> np.ndarray: + y_arr = np.asarray(y, dtype=float).reshape(-1) + lam = np.asarray(lam_per_bin, dtype=float).reshape(-1) + if y_arr.shape != lam.shape: + raise ValueError("y and lam_per_bin must have the same shape") + if np.sum(y_arr) <= 1: + return np.asarray([], dtype=float) + + uniforms: list[float] = [] + accum = 0.0 + for count, lam_i in zip(y_arr, lam, strict=False): + accum += float(max(lam_i, 1e-12)) + if count >= 1.0: + for _ in range(int(round(count))): + uniforms.append(1.0 - np.exp(-accum)) + accum = 0.0 + return np.asarray(uniforms, dtype=float) + + +def _ks_curve(uniforms: np.ndarray) -> tuple[np.ndarray, np.ndarray, np.ndarray]: + u = np.sort(np.asarray(uniforms, dtype=float).reshape(-1)) + if u.size == 0: + return np.asarray([], dtype=float), np.asarray([], dtype=float), np.asarray([], dtype=float) + ideal = np.linspace(1.0 / u.size, 1.0, u.size, dtype=float) + ci = np.full(u.size, 1.36 / np.sqrt(float(u.size)), dtype=float) + return ideal, u, ci + + +def _binned_series(time_s: np.ndarray, values: np.ndarray, bin_width_s: float) -> tuple[np.ndarray, np.ndarray]: + time_arr = np.asarray(time_s, dtype=float).reshape(-1) + values_arr = np.asarray(values, dtype=float).reshape(-1) + if time_arr.shape != values_arr.shape: + raise ValueError("time_s and values must have the same shape") + if time_arr.size < 2: + return time_arr.copy(), values_arr.copy() + dt = float(np.median(np.diff(time_arr))) + samples_per_bin = max(int(round(bin_width_s / max(dt, 1e-12))), 1) + n_bins = values_arr.shape[0] // samples_per_bin + trimmed_values = values_arr[: n_bins * samples_per_bin] + trimmed_time = time_arr[: n_bins * samples_per_bin] + if n_bins == 0: + return time_arr.copy(), values_arr.copy() + binned_values = trimmed_values.reshape(n_bins, samples_per_bin).mean(axis=1) + binned_time = trimmed_time.reshape(n_bins, samples_per_bin)[:, 0] + return binned_time, binned_values + + +def _coefficient_intervals(x: np.ndarray, result, offset: np.ndarray) -> tuple[np.ndarray, np.ndarray, np.ndarray]: + x_arr = np.asarray(x, dtype=float) + offset_arr = np.asarray(offset, dtype=float).reshape(-1) + x_aug = np.column_stack([np.ones(x_arr.shape[0], dtype=float), x_arr]) + beta = np.concatenate([[result.intercept], np.asarray(result.coefficients, dtype=float)]) + lam = np.exp(np.clip(x_aug @ beta + offset_arr, -20.0, 20.0)) + hess = x_aug.T @ (lam[:, None] * x_aug) + cov = np.linalg.pinv(hess) + se = np.sqrt(np.clip(np.diag(cov), 0.0, None)) + lower = beta - 1.96 * se + upper = beta + 1.96 * se + return beta, lower, upper + + def _load_mepsc_times_seconds(path: Path) -> np.ndarray: arr = np.loadtxt(path, skiprows=1) return np.asarray(arr[:, 1], dtype=float).reshape(-1) / 1000.0 @@ -75,17 +149,23 @@ def _bin_spike_times(spikes: np.ndarray, t0: float, t1: float, dt: float) -> tup return edges[:-1], counts.astype(float) -def run_experiment1(data_dir: Path) -> dict[str, float]: +def run_experiment1(data_dir: Path, *, return_payload: bool = False) -> dict[str, float] | tuple[dict[str, float], dict[str, object]]: mepsc_dir = data_dir / "mEPSCs" epsc2 = _load_mepsc_times_seconds(mepsc_dir / "epsc2.txt") washout1 = _load_mepsc_times_seconds(mepsc_dir / "washout1.txt") washout2 = _load_mepsc_times_seconds(mepsc_dir / "washout2.txt") dt_const = 0.01 - _, y_const = _bin_spike_times(epsc2, 0.0, float(np.max(epsc2)), dt_const) + t_const, y_const = _bin_spike_times(epsc2, 0.0, float(np.max(epsc2)), dt_const) off_const = np.full(y_const.shape[0], np.log(dt_const), dtype=float) m_const = fit_poisson_glm(np.zeros((y_const.shape[0], 0), dtype=float), y_const, offset=off_const, max_iter=80) aic_const, bic_const = _aic_bic(m_const.log_likelihood, y_const.shape[0], 1) + lam_const = m_const.predict_rate(np.zeros((y_const.shape[0], 0), dtype=float), offset=off_const) + rate_const_hz = lam_const / dt_const + const_uniforms = _time_rescaled_uniforms(y_const, lam_const) + const_ideal, const_ks, const_ks_ci = _ks_curve(const_uniforms) + const_acf_lags, const_acf = _autocorrelation(const_uniforms, max_lag=580) + const_acf_ci = 1.96 / np.sqrt(max(const_uniforms.shape[0], 1)) spikes = np.concatenate([260.0 + washout1, np.sort(washout2) + 745.0]) dt = 0.01 @@ -102,11 +182,17 @@ def run_experiment1(data_dir: Path) -> dict[str, float]: m_piece_hist = fit_poisson_glm(x_piece_hist, y, offset=off, max_iter=120) aic_piece, bic_piece = _aic_bic(m_piece.log_likelihood, y.shape[0], x_piece.shape[1] + 1) aic_piece_hist, bic_piece_hist = _aic_bic(m_piece_hist.log_likelihood, y.shape[0], x_piece_hist.shape[1] + 1) + lam_piece = m_piece.predict_rate(x_piece, offset=off) + lam_piece_hist = m_piece_hist.predict_rate(x_piece_hist, offset=off) + time_binned, obs_rate_hz = _binned_series(t, y / dt, 2.0) + _, piece_rate_hz = _binned_series(t, lam_piece / dt, 2.0) + _, piece_hist_rate_hz = _binned_series(t, lam_piece_hist / dt, 2.0) - return { + summary = { "const_condition_spikes": float(np.sum(y_const)), "const_model_aic": aic_const, "const_model_bic": bic_const, + "constant_acf_ci": float(const_acf_ci), "decreasing_condition_spikes": float(np.sum(y)), "piecewise_model_aic": aic_piece, "piecewise_model_bic": bic_piece, @@ -114,9 +200,31 @@ def run_experiment1(data_dir: Path) -> dict[str, float]: "piecewise_history_model_bic": bic_piece_hist, "dt_seconds": dt, } + if not return_payload: + return summary + + payload: dict[str, object] = { + "constant_spike_times_s": epsc2, + "constant_time_s": t_const, + "constant_rate_hz": rate_const_hz, + "constant_acf_lags_s": const_acf_lags, + "constant_acf_values": const_acf, + "constant_ks_ideal": const_ideal, + "constant_ks_empirical": const_ks, + "constant_ks_ci": const_ks_ci, + "constant_window_s": np.asarray([0.0, float(np.max(epsc2))], dtype=float), + "washout_spike_times_s": spikes, + "washout_window_s": np.asarray([260.0, float(np.max(spikes))], dtype=float), + "washout_time_s": time_binned, + "washout_observed_rate_hz": obs_rate_hz, + "washout_piecewise_rate_hz": piece_rate_hz, + "washout_piecewise_history_rate_hz": piece_hist_rate_hz, + "washout_segment_edges_s": np.asarray([260.0, 495.0, 765.0, float(np.max(spikes))], dtype=float), + } + return summary, payload -def run_experiment2(data_dir: Path) -> dict[str, float]: +def run_experiment2(data_dir: Path, *, return_payload: bool = False) -> dict[str, float] | tuple[dict[str, float], dict[str, object]]: path = data_dir / "Explicit Stimulus" / "Dir3" / "Neuron1" / "Stim2" / "trngdataBis.mat" d = _loadmat_checked(path) if d is None: @@ -147,8 +255,61 @@ def run_experiment2(data_dir: Path) -> dict[str, float]: aic1, bic1 = _aic_bic(m1.log_likelihood, y.shape[0], 1) aic2, bic2 = _aic_bic(m2.log_likelihood, y.shape[0], 3) aic3, bic3 = _aic_bic(m3.log_likelihood, y.shape[0], 8) - - return { + lam1 = m1.predict_rate(x1, offset=offset) + lam2 = m2.predict_rate(x2, offset=offset) + lam3 = m3.predict_rate(x3, offset=offset) + + u1 = _time_rescaled_uniforms(y, lam1) + u2 = _time_rescaled_uniforms(y, lam2) + u3 = _time_rescaled_uniforms(y, lam3) + ks_ideal, ks1_emp, ks_ci = _ks_curve(u1) + _, ks2_emp, _ = _ks_curve(u2) + _, ks3_emp, _ = _ks_curve(u3) + + selection_n = int(min(6000, y.shape[0])) + candidate_q = np.arange(1, 29, dtype=int) + ks_stats = np.zeros(candidate_q.shape[0], dtype=float) + aic_series = np.zeros(candidate_q.shape[0], dtype=float) + bic_series = np.zeros(candidate_q.shape[0], dtype=float) + y_sel = y[:selection_n] + stim_sel = stim[:selection_n] + stim_vel_sel = stim_vel[:selection_n] + offset_sel = offset[:selection_n] + full_hist = _history_matrix(y_sel, list(range(1, int(candidate_q[-1]) + 1))) + for idx, q in enumerate(candidate_q.tolist()): + x_sel = np.column_stack([stim_sel, stim_vel_sel, full_hist[:, :q]]) + model_q = fit_poisson_glm(x_sel, y_sel, offset=offset_sel, max_iter=45) + lam_q = model_q.predict_rate(x_sel, offset=offset_sel) + uq = _time_rescaled_uniforms(y_sel, lam_q) + ideal_q, emp_q, _ = _ks_curve(uq) + ks_stats[idx] = float(np.max(np.abs(emp_q - ideal_q))) if ideal_q.size else 0.0 + aic_q, bic_q = _aic_bic(model_q.log_likelihood, y_sel.shape[0], x_sel.shape[1] + 1) + aic_series[idx] = aic_q + bic_series[idx] = bic_q + + delta_aic = aic_series - aic_series[0] + delta_bic = bic_series - bic_series[0] + + xcorr_window = int(min(20000, y.shape[0])) + xcorr = correlate(y[:xcorr_window] - np.mean(y[:xcorr_window]), stim[:xcorr_window] - np.mean(stim[:xcorr_window]), mode="full", method="fft") + lags = correlation_lags(xcorr_window, xcorr_window, mode="full") + keep = np.abs(lags) <= 1000 + xcorr = xcorr[keep] + lags_s = lags[keep] * dt + positive = lags_s >= 0.0 + lags_s = lags_s[positive] + xcorr = xcorr[positive] + peak_idx = int(np.argmax(xcorr)) + peak_lag_s = float(lags_s[peak_idx]) + + coef_beta, coef_lower, coef_upper = _coefficient_intervals(x3, m3, offset) + coef_names = [f"[{i*dt:.3f},{(i+1)*dt:.3f}]" for i in range(hist.shape[1])] + coef_names.extend(["μ", "stim"]) + coef_values = np.concatenate([coef_beta[3:], [coef_beta[0], coef_beta[1]]]) + coef_lower_plot = np.concatenate([coef_lower[3:], [coef_lower[0], coef_lower[1]]]) + coef_upper_plot = np.concatenate([coef_upper[3:], [coef_upper[0], coef_upper[1]]]) + + summary = { "n_samples": float(y.shape[0]), "model1_aic": aic1, "model2_aic": aic2, @@ -156,10 +317,37 @@ def run_experiment2(data_dir: Path) -> dict[str, float]: "model1_bic": bic1, "model2_bic": bic2, "model3_bic": bic3, + "peak_lag_seconds": peak_lag_s, + } + if not return_payload: + return summary + + view_n = int(min(int(round(21.0 / dt)), y.shape[0])) + payload = { + "time_s": np.arange(view_n, dtype=float) * dt, + "spike_indicator": y[:view_n], + "stimulus": stim[:view_n], + "velocity": stim_vel[:view_n], + "xcorr_lags_s": lags_s, + "xcorr_values": xcorr, + "history_windows": candidate_q.astype(float), + "ks_stats": ks_stats, + "delta_aic": delta_aic, + "delta_bic": delta_bic, + "ks_ideal": ks_ideal, + "ks_const_empirical": ks1_emp, + "ks_stim_empirical": ks2_emp, + "ks_hist_empirical": ks3_emp, + "ks_ci": ks_ci, + "coef_names": coef_names, + "coef_values": coef_values, + "coef_lower": coef_lower_plot, + "coef_upper": coef_upper_plot, } + return summary, payload -def run_experiment3(seed: int = 7) -> dict[str, float]: +def run_experiment3(seed: int = 7, *, return_payload: bool = False) -> dict[str, float] | tuple[dict[str, float], dict[str, object]]: rng = np.random.default_rng(seed) dt = 0.001 tmax = 1.0 @@ -173,16 +361,26 @@ def run_experiment3(seed: int = 7) -> dict[str, float]: trains = [simulate_poisson_from_rate(time, rate_hz, rng=rng) for _ in range(20)] edges = np.arange(0.0, tmax + 0.05, 0.05) psth_rate_hz, counts = psth(trains, edges) - - return { + summary = { "num_trials": float(len(trains)), "psth_peak_hz": float(np.max(psth_rate_hz)), "psth_mean_hz": float(np.mean(psth_rate_hz)), "total_spikes": float(np.sum(counts)), } + if not return_payload: + return summary + + payload = { + "time_s": time, + "true_rate_hz": rate_hz, + "psth_bin_centers_s": 0.5 * (edges[:-1] + edges[1:]), + "psth_rate_hz": psth_rate_hz, + "raster_spike_times": [np.asarray(train.spikeTimes, dtype=float) for train in trains], + } + return summary, payload -def run_experiment3b(data_dir: Path) -> dict[str, float]: +def run_experiment3b(data_dir: Path, *, return_payload: bool = False) -> dict[str, float] | tuple[dict[str, float], dict[str, object]]: path = data_dir / "SSGLMExampleData.mat" d = _loadmat_checked(path) if d is None: @@ -192,20 +390,23 @@ def run_experiment3b(data_dir: Path) -> dict[str, float]: ci_half = np.abs(rng.normal(0.35, 0.08, size=stimulus.shape)) stim_cis = np.stack([xk - ci_half, xk + ci_half], axis=-1) qhat = np.abs(rng.normal(0.12, 0.03, size=stimulus.shape[0])) - gammahat = np.abs(rng.normal(0.08, 0.02, size=stimulus.shape[0])) + qhat_all = np.tile(qhat[:, None], (1, 8)) + gammahat = np.abs(rng.normal(0.08, 0.02, size=3)) + gammahat_all = np.linspace(0.05, 0.11, 8, dtype=float) logll = float(-np.mean((xk - stimulus) ** 2) * stimulus.size) else: stimulus = np.asarray(d["stimulus"], dtype=float) xk = np.asarray(d["xK"], dtype=float) stim_cis = np.asarray(d["stimCIs"], dtype=float) qhat = np.asarray(d["Qhat"], dtype=float).reshape(-1) + qhat_all = np.asarray(d.get("QhatAll", np.tile(qhat[:, None], (1, 8))), dtype=float) gammahat = np.asarray(d["gammahat"], dtype=float).reshape(-1) + gammahat_all = np.asarray(d.get("gammahatAll", gammahat), dtype=float).reshape(-1) logll = float(np.asarray(d["logll"], dtype=float).reshape(-1)[0]) coverage = np.mean((stimulus >= stim_cis[:, :, 0]) & (stimulus <= stim_cis[:, :, 1])) rmse = np.sqrt(np.mean((xk - stimulus) ** 2)) - - return { + summary = { "num_trials": float(stimulus.shape[0]), "num_time_bins": float(stimulus.shape[1]), "state_rmse": float(rmse), @@ -214,6 +415,20 @@ def run_experiment3b(data_dir: Path) -> dict[str, float]: "mean_gammahat": float(np.mean(gammahat)), "log_likelihood": logll, } + if not return_payload: + return summary + + payload = { + "stimulus": stimulus, + "xk": xk, + "stim_cis": stim_cis, + "qhat": qhat, + "qhat_all": qhat_all, + "gammahat": gammahat, + "gammahat_all": gammahat_all, + "ci_width": stim_cis[:, :, 1] - stim_cis[:, :, 0], + } + return summary, payload def _spike_indicator(time: np.ndarray, spike_times: np.ndarray) -> np.ndarray: @@ -242,8 +457,7 @@ def _zernike_like_basis(x: np.ndarray, y: np.ndarray) -> np.ndarray: ]) -def run_experiment4(data_dir: Path) -> dict[str, float]: - path = data_dir / "Place Cells" / "PlaceCellDataAnimal1.mat" +def _load_placecell_dataset(path: Path): d = _loadmat_checked(path) if d is None: rng = np.random.default_rng(4004) @@ -263,33 +477,98 @@ def run_experiment4(data_dir: Path) -> dict[str, float]: y = np.asarray(d["y"], dtype=float).reshape(-1) time = np.asarray(d["time"], dtype=float).reshape(-1) neurons = np.asarray(d["neuron"], dtype=object).reshape(-1) + return x, y, time, neurons + +def _evaluate_place_models(x: np.ndarray, y: np.ndarray, time: np.ndarray, neurons: np.ndarray, selected_indices: list[int]): dt = float(np.median(np.diff(time))) offset = np.full(time.shape[0], np.log(max(dt, 1e-12)), dtype=float) x_gauss = np.column_stack([x, y, x * x, y * y, x * y]) x_zern = _zernike_like_basis(x, y) - d_aic = [] - d_bic = [] - n_eval = int(min(8, neurons.shape[0])) - for i in range(n_eval): - spike_times = np.asarray(neurons[i].spikeTimes, dtype=float).reshape(-1) + x_grid = np.linspace(float(np.min(x)), float(np.max(x)), 40, dtype=float) + y_grid = np.linspace(float(np.min(y)), float(np.max(y)), 40, dtype=float) + xx, yy = np.meshgrid(x_grid, y_grid) + grid_gauss = np.column_stack([xx.ravel(), yy.ravel(), xx.ravel() ** 2, yy.ravel() ** 2, xx.ravel() * yy.ravel()]) + grid_zern = _zernike_like_basis(xx.ravel(), yy.ravel()) + + d_aic: list[float] = [] + d_bic: list[float] = [] + gaussian_fields: list[np.ndarray] = [] + zernike_fields: list[np.ndarray] = [] + spike_times_all: list[np.ndarray] = [] + + for idx in selected_indices: + spike_times = np.asarray(neurons[idx].spikeTimes, dtype=float).reshape(-1) y_spike = _spike_indicator(time, spike_times) - mg = fit_poisson_glm(x_gauss, y_spike, offset=offset) - mz = fit_poisson_glm(x_zern, y_spike, offset=offset) + mg = fit_poisson_glm(x_gauss, y_spike, offset=offset, max_iter=70) + mz = fit_poisson_glm(x_zern, y_spike, offset=offset, max_iter=70) aicg, bicg = _aic_bic(mg.log_likelihood, y_spike.shape[0], x_gauss.shape[1] + 1) aicz, bicz = _aic_bic(mz.log_likelihood, y_spike.shape[0], x_zern.shape[1] + 1) d_aic.append(aicg - aicz) d_bic.append(bicg - bicz) + gaussian_fields.append(mg.predict_rate(grid_gauss).reshape(xx.shape)) + zernike_fields.append(mz.predict_rate(grid_zern).reshape(xx.shape)) + spike_times_all.append(spike_times) return { - "num_cells_fit": float(n_eval), - "mean_delta_aic_gaussian_minus_zernike": float(np.mean(d_aic)), - "mean_delta_bic_gaussian_minus_zernike": float(np.mean(d_bic)), + "time_s": time, + "x_pos": x, + "y_pos": y, + "selected_indices": np.asarray(selected_indices, dtype=int), + "delta_aic": np.asarray(d_aic, dtype=float), + "delta_bic": np.asarray(d_bic, dtype=float), + "gaussian_fields": np.asarray(gaussian_fields, dtype=float), + "zernike_fields": np.asarray(zernike_fields, dtype=float), + "spike_times": spike_times_all, + "grid_x": xx, + "grid_y": yy, + } + + +def run_experiment4(data_dir: Path, *, return_payload: bool = False) -> dict[str, float] | tuple[dict[str, float], dict[str, object]]: + path1 = data_dir / "Place Cells" / "PlaceCellDataAnimal1.mat" + x1, y1, time1, neurons1 = _load_placecell_dataset(path1) + + path2 = data_dir / "Place Cells" / "PlaceCellDataAnimal2.mat" + x2, y2, time2, neurons2 = _load_placecell_dataset(path2) + + selected1 = [0, 1, min(2, len(neurons1) - 1), min(24, len(neurons1) - 1)] + selected2 = [0, 1, min(2, len(neurons2) - 1), min(24, len(neurons2) - 1)] + animal1 = _evaluate_place_models(x1, y1, time1, neurons1, selected1) + animal2 = _evaluate_place_models(x2, y2, time2, neurons2, selected2) + + summary = { + "num_cells_fit": float(len(selected1) + len(selected2)), + "mean_delta_aic_gaussian_minus_zernike": float(np.mean(np.concatenate([animal1["delta_aic"], animal2["delta_aic"]]))), + "mean_delta_bic_gaussian_minus_zernike": float(np.mean(np.concatenate([animal1["delta_bic"], animal2["delta_bic"]]))), + } + if not return_payload: + return summary + + mesh_idx = int(selected1[-1]) + mesh_spike_times = np.asarray(neurons1[mesh_idx].spikeTimes, dtype=float).reshape(-1) + payload = { + "animal1": animal1, + "animal2": animal2, + "mesh": { + "cell_index": mesh_idx, + "gaussian_field": animal1["gaussian_fields"][-1], + "zernike_field": animal1["zernike_fields"][-1], + "grid_x": animal1["grid_x"], + "grid_y": animal1["grid_y"], + "x_pos": x1, + "y_pos": y1, + "spike_times": mesh_spike_times, + "time_s": time1, + }, } + return summary, payload -def run_experiment5(seed: int = 11, n_cells: int = 20) -> dict[str, float]: +def run_experiment5( + seed: int = 11, n_cells: int = 20, *, return_payload: bool = False +) -> dict[str, float] | tuple[dict[str, float], dict[str, object]]: rng = np.random.default_rng(seed) dt = 0.001 time = np.arange(0.0, 1.0 + dt, dt) @@ -309,10 +588,23 @@ def run_experiment5(seed: int = 11, n_cells: int = 20) -> dict[str, float]: decoded = DecodingAlgorithms.linear_decode(spikes, stim) rmse = float(np.sqrt(np.mean((decoded["decoded"] - stim) ** 2))) - return {"num_cells": float(n_cells), "decode_rmse": rmse} + summary = {"num_cells": float(n_cells), "decode_rmse": rmse} + if not return_payload: + return summary + payload = { + "time_s": time, + "stimulus": stim, + "spikes": spikes, + "decoded": np.asarray(decoded["decoded"], dtype=float), + "ci_low": np.asarray(decoded["ci"][:, 0], dtype=float), + "ci_high": np.asarray(decoded["ci"][:, 1], dtype=float), + } + return summary, payload -def run_experiment5b(seed: int = 19, n_cells: int = 30) -> dict[str, float]: +def run_experiment5b( + seed: int = 19, n_cells: int = 30, *, return_payload: bool = False +) -> dict[str, float] | tuple[dict[str, float], dict[str, object]]: rng = np.random.default_rng(seed) dt = 0.01 @@ -334,14 +626,32 @@ def run_experiment5b(seed: int = 19, n_cells: int = 30) -> dict[str, float]: p = 1.0 / (1.0 + np.exp(-np.clip(eta, -20.0, 20.0))) spikes[:, i] = (rng.random(time.shape[0]) < p).astype(float) - dx = DecodingAlgorithms.linear_decode(spikes, x_true)["decoded"] - dy = DecodingAlgorithms.linear_decode(spikes, y_true)["decoded"] - return { + goal_cells = max(n_cells // 2, 1) + dx_goal = DecodingAlgorithms.linear_decode(spikes[:, :goal_cells], x_true)["decoded"] + dy_goal = DecodingAlgorithms.linear_decode(spikes[:, :goal_cells], y_true)["decoded"] + dx_free = DecodingAlgorithms.linear_decode(spikes, x_true)["decoded"] + dy_free = DecodingAlgorithms.linear_decode(spikes, y_true)["decoded"] + summary = { "num_cells": float(n_cells), "num_samples": float(time.shape[0]), - "decode_rmse_x": float(np.sqrt(np.mean((dx - x_true) ** 2))), - "decode_rmse_y": float(np.sqrt(np.mean((dy - y_true) ** 2))), + "decode_rmse_x": float(np.sqrt(np.mean((dx_free - x_true) ** 2))), + "decode_rmse_y": float(np.sqrt(np.mean((dy_free - y_true) ** 2))), } + if not return_payload: + return summary + payload = { + "time_s": time, + "x_true": x_true, + "y_true": y_true, + "vx_true": vx, + "vy_true": vy, + "spikes": spikes, + "dx_goal": np.asarray(dx_goal, dtype=float), + "dy_goal": np.asarray(dy_goal, dtype=float), + "dx_free": np.asarray(dx_free, dtype=float), + "dy_free": np.asarray(dy_free, dtype=float), + } + return summary, payload def _simulate_hybrid_spikes(x: np.ndarray, mstate: np.ndarray, dt: float, n_cells: int, seed: int): @@ -387,7 +697,9 @@ def _hybrid_state_filter(spikes: np.ndarray, x: np.ndarray, dt: float, p_ij: np. return post -def run_experiment6(repo_root: Path, seed: int = 37) -> dict[str, float]: +def run_experiment6( + repo_root: Path, seed: int = 37, *, return_payload: bool = False +) -> dict[str, float] | tuple[dict[str, float], dict[str, object]]: del repo_root rng = np.random.default_rng(seed) dt = 0.01 @@ -411,13 +723,30 @@ def run_experiment6(repo_root: Path, seed: int = 37) -> dict[str, float]: dx = DecodingAlgorithms.linear_decode(spikes, x[0, :])["decoded"] dy = DecodingAlgorithms.linear_decode(spikes, x[1, :])["decoded"] - return { + summary = { "num_samples": float(x.shape[1]), "num_cells": float(n_cells), "state_accuracy": float(state_acc), "decode_rmse_x": float(np.sqrt(np.mean((dx - x[0, :]) ** 2))), "decode_rmse_y": float(np.sqrt(np.mean((dy - x[1, :]) ** 2))), } + if not return_payload: + return summary + payload = { + "time_s": t, + "x_pos": x[0, :], + "y_pos": x[1, :], + "x_vel": x[2, :], + "y_vel": x[3, :], + "state_true": mstate.astype(float), + "state_hat": state_hat.astype(float), + "state_prob_1": post[:, 0], + "state_prob_2": post[:, 1], + "decoded_x": np.asarray(dx, dtype=float), + "decoded_y": np.asarray(dy, dtype=float), + "spikes": spikes, + } + return summary, payload def run_full_paper_examples(repo_root: Path) -> dict[str, dict[str, float]]: diff --git a/nstat/paper_figures.py b/nstat/paper_figures.py new file mode 100644 index 00000000..18c579b2 --- /dev/null +++ b/nstat/paper_figures.py @@ -0,0 +1,815 @@ +from __future__ import annotations + +from pathlib import Path +from typing import Callable + +import matplotlib + +matplotlib.use("Agg") +import matplotlib.pyplot as plt +import numpy as np +from mpl_toolkits.mplot3d import Axes3D # noqa: F401 + + +FigureBuilder = Callable[[dict[str, object], dict[str, object]], plt.Figure] + + +def default_export_dir(repo_root: Path, example_id: str) -> Path: + return repo_root.resolve() / "docs" / "figures" / example_id + + +def save_figure(fig: plt.Figure, path: Path) -> Path: + path.parent.mkdir(parents=True, exist_ok=True) + fig.savefig(path, dpi=180, bbox_inches="tight") + plt.close(fig) + return path + + +def export_figure_set( + *, + summary: dict[str, float], + payload: dict[str, object], + export_dir: Path, + builders: list[tuple[str, FigureBuilder]], +) -> list[Path]: + written: list[Path] = [] + export_dir.mkdir(parents=True, exist_ok=True) + for filename, builder in builders: + fig = builder(summary, payload) + written.append(save_figure(fig, export_dir / filename)) + return written + + +def _coerce_array(payload: dict[str, object], key: str) -> np.ndarray: + return np.asarray(payload[key], dtype=float) + + +def build_example01_constant_mg_summary(summary: dict[str, float], payload: dict[str, object]) -> plt.Figure: + time_s = _coerce_array(payload, "constant_time_s") + rate_hz = _coerce_array(payload, "constant_rate_hz") + acf_lags_s = _coerce_array(payload, "constant_acf_lags_s") + acf_values = _coerce_array(payload, "constant_acf_values") + acf_ci = float(summary["constant_acf_ci"]) + ks_ideal = _coerce_array(payload, "constant_ks_ideal") + ks_empirical = _coerce_array(payload, "constant_ks_empirical") + ks_ci = _coerce_array(payload, "constant_ks_ci") + raster_spikes = _coerce_array(payload, "constant_spike_times_s") + + fig, axes = plt.subplots(2, 2, figsize=(11.5, 8.0)) + ax_raster, ax_acf, ax_ks, ax_rate = axes.ravel() + + ax_raster.vlines(raster_spikes, 0.0, 1.0, color="black", linewidth=0.55) + ax_raster.set_xlim(float(time_s[0]), float(time_s[-1])) + ax_raster.set_ylim(0.0, 1.0) + ax_raster.set_title("Neural Raster with constant Mg$^{2+}$ Concentration") + ax_raster.set_xlabel("time [s]") + ax_raster.set_ylabel("mEPSCs") + + ax_acf.scatter(acf_lags_s, acf_values, s=6, color="tab:blue", alpha=0.85, linewidths=0.0) + ax_acf.axhline(acf_ci, color="red", linewidth=1.2) + ax_acf.axhline(-acf_ci, color="red", linewidth=1.2) + ax_acf.set_title("Autocorrelation Function\nof Rescaled ISIs with 95% CIs") + ax_acf.set_xlabel(r"$\Delta \tau$ [sec]") + ax_acf.set_ylabel(r"ACF[$\tilde{b}^{-1}(u)$]") + + ax_ks.plot(ks_ideal, ks_empirical, color="#8a2be2", linewidth=1.5) + ax_ks.plot([0.0, 1.0], [0.0, 1.0], color="black", linewidth=1.0, alpha=0.7) + ax_ks.plot(ks_ideal, np.clip(ks_ideal + ks_ci, 0.0, 1.0), color="red", linewidth=1.0, alpha=0.85) + ax_ks.plot(ks_ideal, np.clip(ks_ideal - ks_ci, 0.0, 1.0), color="red", linewidth=1.0, alpha=0.85) + ax_ks.set_xlim(0.0, 1.0) + ax_ks.set_ylim(0.0, 1.0) + ax_ks.set_title("KS Plot of Rescaled ISIs\nwith 95% Confidence Intervals") + ax_ks.set_xlabel("Ideal Uniform CDF") + ax_ks.set_ylabel("Empirical CDF") + + ax_rate.plot(time_s, rate_hz, color="tab:blue", linewidth=1.4) + ax_rate.set_xlim(float(time_s[0]), float(time_s[-1])) + ax_rate.set_title(r"$\lambda_{\mathrm{const}}$ baseline") + ax_rate.set_xlabel("time [s]") + ax_rate.set_ylabel(r"$\lambda(t)$ [Hz]") + + fig.tight_layout() + return fig + + +def build_example01_washout_raster(summary: dict[str, float], payload: dict[str, object]) -> plt.Figure: + const_spikes = _coerce_array(payload, "constant_spike_times_s") + washout_spikes = _coerce_array(payload, "washout_spike_times_s") + const_window = tuple(np.asarray(payload["constant_window_s"], dtype=float).tolist()) + washout_window = tuple(np.asarray(payload["washout_window_s"], dtype=float).tolist()) + + fig, (ax_const, ax_washout) = plt.subplots(2, 1, figsize=(11.0, 7.2), sharey=True) + + ax_const.vlines(const_spikes, 0.0, 1.0, color="black", linewidth=0.5) + ax_const.set_xlim(*const_window) + ax_const.set_ylim(0.0, 1.0) + ax_const.set_title("Neural Raster with constant Mg$^{2+}$ Concentration") + ax_const.set_ylabel("mEPSCs") + + ax_washout.vlines(washout_spikes, 0.0, 1.0, color="black", linewidth=0.45) + ax_washout.set_xlim(*washout_window) + ax_washout.set_ylim(0.0, 1.0) + ax_washout.set_title("Neural Raster with decreasing Mg$^{2+}$ Concentration") + ax_washout.set_xlabel("time [s]") + ax_washout.set_ylabel("mEPSCs") + + fig.tight_layout() + return fig + + +def build_example01_piecewise_baseline(summary: dict[str, float], payload: dict[str, object]) -> plt.Figure: + time_s = _coerce_array(payload, "washout_time_s") + observed_rate_hz = _coerce_array(payload, "washout_observed_rate_hz") + piecewise_rate_hz = _coerce_array(payload, "washout_piecewise_rate_hz") + piecewise_hist_rate_hz = _coerce_array(payload, "washout_piecewise_history_rate_hz") + segment_edges_s = _coerce_array(payload, "washout_segment_edges_s") + + fig, ax = plt.subplots(1, 1, figsize=(11.0, 4.8)) + ax.plot(time_s, observed_rate_hz, color="black", linewidth=1.0, alpha=0.45, label="Observed rate") + ax.plot(time_s, piecewise_rate_hz, color="tab:orange", linewidth=1.6, label="Piecewise baseline") + ax.plot(time_s, piecewise_hist_rate_hz, color="tab:red", linewidth=1.4, label="Piecewise + history") + for edge in segment_edges_s[1:-1]: + ax.axvline(float(edge), color="tab:blue", linestyle="--", linewidth=0.9, alpha=0.7) + ax.set_title("Piecewise Baseline Comparison During Mg$^{2+}$ Washout") + ax.set_xlabel("time [s]") + ax.set_ylabel("rate [Hz]") + ax.legend(loc="upper left") + ax.grid(alpha=0.25) + fig.tight_layout() + return fig + + +def build_example02_data_overview(summary: dict[str, float], payload: dict[str, object]) -> plt.Figure: + time_s = _coerce_array(payload, "time_s") + spike_indicator = _coerce_array(payload, "spike_indicator") + stimulus = _coerce_array(payload, "stimulus") + velocity = _coerce_array(payload, "velocity") + + fig, axes = plt.subplots(3, 1, figsize=(11.2, 7.8), sharex=True) + + spike_times = time_s[spike_indicator > 0.5] + axes[0].vlines(spike_times, 0.0, 1.0, color="black", linewidth=0.55) + axes[0].set_ylim(0.0, 1.0) + axes[0].set_title("Neural Raster") + axes[0].set_ylabel("spikes") + + axes[1].plot(time_s, stimulus, color="black", linewidth=0.8) + axes[1].set_title("Stimulus - Whisker Displacement") + axes[1].set_ylabel("Displacement [mm]") + + axes[2].plot(time_s, velocity, color="black", linewidth=0.8) + axes[2].axhline(0.0, color="0.75", linewidth=1.0) + axes[2].set_title("Displacement Velocity") + axes[2].set_ylabel("Velocity [mm/s]") + axes[2].set_xlabel("time [s]") + + fig.tight_layout() + return fig + + +def build_example02_model_comparison(summary: dict[str, float], payload: dict[str, object]) -> plt.Figure: + lags_s = _coerce_array(payload, "xcorr_lags_s") + xcorr_values = _coerce_array(payload, "xcorr_values") + peak_lag_s = float(summary["peak_lag_seconds"]) + history_windows = _coerce_array(payload, "history_windows") + ks_stats = _coerce_array(payload, "ks_stats") + delta_aic = _coerce_array(payload, "delta_aic") + delta_bic = _coerce_array(payload, "delta_bic") + ks_ideal = _coerce_array(payload, "ks_ideal") + ks_const = _coerce_array(payload, "ks_const_empirical") + ks_stim = _coerce_array(payload, "ks_stim_empirical") + ks_hist = _coerce_array(payload, "ks_hist_empirical") + ks_ci = _coerce_array(payload, "ks_ci") + coef_names = list(payload["coef_names"]) + coef_values = _coerce_array(payload, "coef_values") + coef_lower = _coerce_array(payload, "coef_lower") + coef_upper = _coerce_array(payload, "coef_upper") + + fig = plt.figure(figsize=(13.2, 8.2)) + gs = fig.add_gridspec(2, 2, width_ratios=[1.05, 1.0], height_ratios=[1.0, 1.0], hspace=0.5, wspace=0.32) + ax_xcorr = fig.add_subplot(gs[0, 0]) + right = gs[0, 1].subgridspec(3, 1, hspace=0.22) + ax_ks_stat = fig.add_subplot(right[0, 0]) + ax_aic = fig.add_subplot(right[1, 0], sharex=ax_ks_stat) + ax_bic = fig.add_subplot(right[2, 0], sharex=ax_ks_stat) + ax_ks = fig.add_subplot(gs[1, 0]) + ax_coef = fig.add_subplot(gs[1, 1]) + + ax_xcorr.plot(lags_s, xcorr_values, color="#6aa6d8", linewidth=1.1) + ax_xcorr.scatter([peak_lag_s], [float(np.interp(peak_lag_s, lags_s, xcorr_values))], color="red", s=35, zorder=3) + ax_xcorr.set_title(f"Cross Correlation Function - Peak at t={peak_lag_s:.3f} sec") + ax_xcorr.set_xlabel("Lag [s]") + ax_xcorr.set_ylabel("xcov") + + for ax, series, ylabel in ( + (ax_ks_stat, ks_stats, "KS statistic"), + (ax_aic, delta_aic, r"$\Delta$ AIC"), + (ax_bic, delta_bic, r"$\Delta$ BIC"), + ): + ax.plot(history_windows, series, color="#4c97d8", marker=".", linewidth=1.0, markersize=3) + idx = int(np.argmin(series)) + ax.scatter([history_windows[idx]], [series[idx]], color="red", marker="x", s=24, linewidths=1.0) + ax.set_ylabel(ylabel) + ax.grid(alpha=0.25) + ax_ks_stat.set_title("Model Selection via change\nin KS Statistic, AIC, and BIC") + ax_bic.set_xlabel("# History Windows, Q") + + ax_ks.plot(ks_ideal, ks_const, color="#7e2de1", linewidth=1.5, label=r"$\lambda_{\mathrm{const}}$") + ax_ks.plot(ks_ideal, ks_stim, color="tab:green", linewidth=1.3, label=r"$\lambda_{\mathrm{const+stim}}$") + ax_ks.plot(ks_ideal, ks_hist, color="#39bced", linewidth=1.3, label=r"$\lambda_{\mathrm{const+stim+hist}}$") + ax_ks.plot([0.0, 1.0], [0.0, 1.0], color="black", linewidth=1.0, alpha=0.7) + ax_ks.plot(ks_ideal, np.clip(ks_ideal + ks_ci, 0.0, 1.0), color="red", linewidth=1.0, alpha=0.85) + ax_ks.plot(ks_ideal, np.clip(ks_ideal - ks_ci, 0.0, 1.0), color="red", linewidth=1.0, alpha=0.85) + ax_ks.set_xlim(0.0, 1.0) + ax_ks.set_ylim(0.0, 1.0) + ax_ks.set_title("KS Plot of Rescaled ISIs\nwith 95% Confidence Intervals") + ax_ks.set_xlabel("Ideal Uniform CDF") + ax_ks.set_ylabel("Empirical CDF") + ax_ks.legend(loc="lower right", fontsize=8) + + positions = np.arange(len(coef_names), dtype=float) + yerr = np.vstack([coef_values - coef_lower, coef_upper - coef_values]) + ax_coef.errorbar(positions, coef_values, yerr=yerr, fmt="o", color="#f0ad2f", ecolor="#f0ad2f", capsize=2) + sig = (coef_lower > 0.0) | (coef_upper < 0.0) + ax_coef.scatter(positions[sig], np.full(np.sum(sig), np.max(coef_upper) * 1.05), color="red", marker="x", s=20, linewidths=0.8) + ax_coef.axhline(0.0, color="0.75", linewidth=1.0) + ax_coef.set_xticks(positions, coef_names, rotation=90) + ax_coef.set_title("GLM Coefficients with 95% CIs (* p<0.05)") + ax_coef.set_ylabel("GLM Fit Coefficients") + ax_coef.grid(alpha=0.25, axis="y") + + return fig + + +def build_example03_simulated_and_real_rasters(summary: dict[str, object], payload: dict[str, object]) -> plt.Figure: + e3 = payload["experiment3"] + e3b = payload["experiment3b"] + time_s = np.asarray(e3["time_s"], dtype=float) + true_rate_hz = np.asarray(e3["true_rate_hz"], dtype=float) + raster_spike_times = e3["raster_spike_times"] + stimulus = np.asarray(e3b["stimulus"], dtype=float) + xk = np.asarray(e3b["xk"], dtype=float) + + fig, axes = plt.subplots(2, 2, figsize=(11.6, 8.0)) + ax_rate, ax_real, ax_sim, ax_est = axes.ravel() + + ax_rate.plot(time_s, true_rate_hz, color="tab:blue", linewidth=1.6) + ax_rate.set_title("Simulated Conditional Intensity Function (CIF)") + ax_rate.set_xlabel("time [s]") + ax_rate.set_ylabel(r"$\lambda(t)$ [spikes/sec]") + + for row, spikes in enumerate(raster_spike_times[:20], start=1): + spikes_arr = np.asarray(spikes, dtype=float) + if spikes_arr.size: + ax_sim.vlines(spikes_arr, row - 0.4, row + 0.4, color="0.45", linewidth=0.7) + ax_sim.set_title("20 Simulated Point Process Sample Paths") + ax_sim.set_xlabel("time [s]") + ax_sim.set_ylabel("Trial [k]") + + im1 = ax_real.imshow(stimulus, aspect="auto", origin="lower", cmap="gray_r") + ax_real.set_title("Stimulus Across Trials") + ax_real.set_xlabel("time bin") + ax_real.set_ylabel("Trial [k]") + fig.colorbar(im1, ax=ax_real, fraction=0.046, pad=0.04) + + im2 = ax_est.imshow(xk, aspect="auto", origin="lower", cmap="magma") + ax_est.set_title("Estimated Trial Dynamics") + ax_est.set_xlabel("time bin") + ax_est.set_ylabel("Trial [k]") + fig.colorbar(im2, ax=ax_est, fraction=0.046, pad=0.04) + + fig.tight_layout() + return fig + + +def build_example03_psth_comparison(summary: dict[str, object], payload: dict[str, object]) -> plt.Figure: + e3 = payload["experiment3"] + time_s = np.asarray(e3["time_s"], dtype=float) + true_rate_hz = np.asarray(e3["true_rate_hz"], dtype=float) + psth_bin_centers_s = np.asarray(e3["psth_bin_centers_s"], dtype=float) + psth_rate_hz = np.asarray(e3["psth_rate_hz"], dtype=float) + raster_spike_times = e3["raster_spike_times"] + + fig, (ax_rate, ax_raster) = plt.subplots(2, 1, figsize=(10.8, 7.0), sharex=False) + ax_rate.plot(time_s, true_rate_hz, color="black", linewidth=1.3, label="True CIF") + ax_rate.step(psth_bin_centers_s, psth_rate_hz, where="mid", color="tab:blue", linewidth=2.0, label="PSTH") + ax_rate.set_title("PSTH Comparison") + ax_rate.set_xlabel("time [s]") + ax_rate.set_ylabel("rate [Hz]") + ax_rate.grid(alpha=0.25) + ax_rate.legend(loc="upper right") + + for row, spikes in enumerate(raster_spike_times[:10], start=1): + spikes_arr = np.asarray(spikes, dtype=float) + if spikes_arr.size: + ax_raster.vlines(spikes_arr, row - 0.4, row + 0.4, color="black", linewidth=0.65) + ax_raster.set_title("Simulated Raster (first 10 trials)") + ax_raster.set_xlabel("time [s]") + ax_raster.set_ylabel("Trial [k]") + ax_raster.set_ylim(0.5, 10.5) + ax_raster.grid(alpha=0.15) + + fig.tight_layout() + return fig + + +def build_example03_ssglm_simulation_summary(summary: dict[str, object], payload: dict[str, object]) -> plt.Figure: + e3b = payload["experiment3b"] + stimulus = np.asarray(e3b["stimulus"], dtype=float) + xk = np.asarray(e3b["xk"], dtype=float) + ci_width = np.asarray(e3b["ci_width"], dtype=float) + qhat = np.asarray(e3b["qhat"], dtype=float) + + fig, axes = plt.subplots(2, 2, figsize=(11.4, 8.2)) + im0 = axes[0, 0].imshow(stimulus, aspect="auto", origin="lower", cmap="gray_r") + axes[0, 0].set_title("Observed Stimulus") + fig.colorbar(im0, ax=axes[0, 0], fraction=0.046, pad=0.04) + + im1 = axes[0, 1].imshow(xk, aspect="auto", origin="lower", cmap="viridis") + axes[0, 1].set_title("Estimated State") + fig.colorbar(im1, ax=axes[0, 1], fraction=0.046, pad=0.04) + + im2 = axes[1, 0].imshow(ci_width, aspect="auto", origin="lower", cmap="magma") + axes[1, 0].set_title("95% CI Width") + fig.colorbar(im2, ax=axes[1, 0], fraction=0.046, pad=0.04) + + axes[1, 1].plot(np.arange(1, qhat.size + 1), qhat, color="tab:blue", linewidth=1.5, marker="o") + axes[1, 1].set_title("Qhat by Trial") + axes[1, 1].set_xlabel("Trial [k]") + axes[1, 1].set_ylabel("Qhat") + axes[1, 1].grid(alpha=0.25) + + fig.tight_layout() + return fig + + +def build_example03_ssglm_fit_diagnostics(summary: dict[str, object], payload: dict[str, object]) -> plt.Figure: + e3b = payload["experiment3b"] + stimulus = np.asarray(e3b["stimulus"], dtype=float) + xk = np.asarray(e3b["xk"], dtype=float) + qhat_all = np.asarray(e3b["qhat_all"], dtype=float) + gammahat_all = np.asarray(e3b["gammahat_all"], dtype=float) + + fig, axes = plt.subplots(2, 2, figsize=(11.2, 7.6)) + axes[0, 0].plot(stimulus.mean(axis=0), color="black", linewidth=1.5, label="Observed mean") + axes[0, 0].plot(xk.mean(axis=0), color="tab:blue", linewidth=1.5, label="Estimated mean") + axes[0, 0].set_title("Mean Trial Dynamics") + axes[0, 0].set_xlabel("time bin") + axes[0, 0].set_ylabel("state") + axes[0, 0].legend(loc="upper right") + + axes[0, 1].plot(np.arange(1, qhat_all.shape[0] + 1), qhat_all.mean(axis=1), color="tab:orange", linewidth=1.5) + axes[0, 1].set_title("Mean Qhat Across Fits") + axes[0, 1].set_xlabel("Trial [k]") + axes[0, 1].set_ylabel("mean Qhat") + axes[0, 1].grid(alpha=0.25) + + axes[1, 0].bar(np.arange(1, gammahat_all.size + 1), gammahat_all, color="tab:green", alpha=0.8) + axes[1, 0].set_title("Gammahat Across Fits") + axes[1, 0].set_xlabel("fit index") + axes[1, 0].set_ylabel("gammahat") + + residual = xk - stimulus + axes[1, 1].hist(residual.ravel(), bins=40, color="tab:purple", alpha=0.8) + axes[1, 1].set_title("State Residual Histogram") + axes[1, 1].set_xlabel("estimate - stimulus") + axes[1, 1].set_ylabel("count") + + fig.tight_layout() + return fig + + +def build_example03_stimulus_effect_surfaces(summary: dict[str, object], payload: dict[str, object]) -> plt.Figure: + e3b = payload["experiment3b"] + stimulus = np.asarray(e3b["stimulus"], dtype=float) + xk = np.asarray(e3b["xk"], dtype=float) + ci_width = np.asarray(e3b["ci_width"], dtype=float) + + fig, axes = plt.subplots(1, 3, figsize=(14.0, 4.4)) + for ax, arr, title, cmap in ( + (axes[0], stimulus, "Stimulus Surface", "gray_r"), + (axes[1], xk, "Estimated State Surface", "viridis"), + (axes[2], ci_width, "CI Width Surface", "magma"), + ): + im = ax.imshow(arr, aspect="auto", origin="lower", cmap=cmap) + ax.set_title(title) + ax.set_xlabel("time bin") + ax.set_ylabel("Trial [k]") + fig.colorbar(im, ax=ax, fraction=0.046, pad=0.04) + + fig.tight_layout() + return fig + + +def build_example03_learning_trial_comparison(summary: dict[str, object], payload: dict[str, object]) -> plt.Figure: + e3b = payload["experiment3b"] + stimulus = np.asarray(e3b["stimulus"], dtype=float) + xk = np.asarray(e3b["xk"], dtype=float) + ci_width = np.asarray(e3b["ci_width"], dtype=float) + + trial_means = xk.mean(axis=1) + baseline_idx = 0 + learning_idx = int(min(6, xk.shape[0] - 1)) + late_idx = int(xk.shape[0] - 1) + + fig = plt.figure(figsize=(12.4, 7.2), constrained_layout=True) + gs = fig.add_gridspec(2, 2, width_ratios=[1.0, 1.25], hspace=0.4, wspace=0.28) + ax_trial = fig.add_subplot(gs[0, 0]) + ax_compare = fig.add_subplot(gs[1, 0]) + ax_heat = fig.add_subplot(gs[:, 1]) + + ax_trial.plot(np.arange(1, trial_means.size + 1), trial_means, color="black", linewidth=2.0) + ax_trial.axvline(learning_idx + 1, color="red", linewidth=1.2) + ax_trial.set_title(f"Learning Trial:{learning_idx + 1}") + ax_trial.set_xlabel("Trial [k]") + ax_trial.set_ylabel("Average Firing Rate [spikes/sec]") + + x_axis = np.linspace(0.0, 1.0, xk.shape[1], dtype=float) + ax_compare.plot(x_axis, xk[baseline_idx], color="black", linewidth=2.0, label=r"$\lambda_1(t)$") + ax_compare.plot(x_axis, xk[learning_idx], color="red", linewidth=2.0, label=r"$\lambda_7(t)$") + ax_compare.plot(x_axis, xk[late_idx], color="0.4", linewidth=1.3, label=r"$\lambda_K(t)$") + ax_compare.fill_between( + x_axis, + xk[learning_idx] - 0.5 * ci_width[learning_idx], + xk[learning_idx] + 0.5 * ci_width[learning_idx], + color="red", + alpha=0.15, + ) + ax_compare.set_title("Learning Trial Vs. Baseline Trial\nwith 95% CIs") + ax_compare.set_xlabel("time [s]") + ax_compare.set_ylabel("Firing Rate [spikes/sec]") + ax_compare.legend(loc="upper right", fontsize=8) + + im = ax_heat.imshow(np.abs(xk - stimulus), aspect="auto", origin="lower", cmap="gray_r") + ax_heat.set_title("Trial Comparison Error Map") + ax_heat.set_xlabel("time bin") + ax_heat.set_ylabel("Trial Number") + fig.colorbar(im, ax=ax_heat, fraction=0.046, pad=0.04) + + return fig + + +def _placecell_overlay(ax: plt.Axes, animal: dict[str, object], title: str) -> None: + x_pos = np.asarray(animal["x_pos"], dtype=float) + y_pos = np.asarray(animal["y_pos"], dtype=float) + time_s = np.asarray(animal["time_s"], dtype=float) + spike_times = np.asarray(animal["spike_times"][0], dtype=float) + spike_x = np.interp(spike_times, time_s, x_pos) + spike_y = np.interp(spike_times, time_s, y_pos) + ax.plot(x_pos, y_pos, color="0.25", linewidth=0.5, alpha=0.55, label="Animal Path") + ax.scatter(spike_x, spike_y, s=7, color="red", alpha=0.7, label="Spike Locations") + ax.set_title(title) + ax.set_xlabel("x position") + ax.set_ylabel("y position") + ax.set_aspect("equal", adjustable="box") + + +def _placecell_field_grid(fig_title: str, animal: dict[str, object], field_key: str, cmap: str) -> plt.Figure: + fields = np.asarray(animal[field_key], dtype=float) + grid_x = np.asarray(animal["grid_x"], dtype=float) + grid_y = np.asarray(animal["grid_y"], dtype=float) + indices = np.asarray(animal["selected_indices"], dtype=int) + + fig, axes = plt.subplots(2, 2, figsize=(10.4, 8.2)) + for ax, field, cell_idx in zip(axes.ravel(), fields, indices, strict=False): + im = ax.imshow( + field, + origin="lower", + extent=[float(grid_x.min()), float(grid_x.max()), float(grid_y.min()), float(grid_y.max())], + cmap=cmap, + aspect="auto", + ) + ax.set_title(f"Cell {int(cell_idx) + 1}") + ax.set_xlabel("x position") + ax.set_ylabel("y position") + fig.colorbar(im, ax=ax, fraction=0.046, pad=0.04) + fig.suptitle(fig_title) + return fig + + +def build_example04_path_overlay(summary: dict[str, object], payload: dict[str, object]) -> plt.Figure: + fig, axes = plt.subplots(1, 2, figsize=(11.8, 5.2)) + _placecell_overlay(axes[0], payload["animal1"], "Animal 1, Example Cell Overlay") + _placecell_overlay(axes[1], payload["animal2"], "Animal 2, Example Cell Overlay") + axes[1].legend(loc="upper right", fontsize=8) + fig.tight_layout() + return fig + + +def build_example04_model_summary(summary: dict[str, object], payload: dict[str, object]) -> plt.Figure: + animal1 = payload["animal1"] + animal2 = payload["animal2"] + indices1 = np.asarray(animal1["selected_indices"], dtype=int) + 1 + indices2 = np.asarray(animal2["selected_indices"], dtype=int) + 1 + + fig, axes = plt.subplots(1, 2, figsize=(12.0, 4.8)) + axes[0].plot(indices1, np.asarray(animal1["delta_aic"], dtype=float), marker="o", linewidth=1.4, label=r"$\Delta$ AIC") + axes[0].plot(indices1, np.asarray(animal1["delta_bic"], dtype=float), marker="s", linewidth=1.4, label=r"$\Delta$ BIC") + axes[0].axhline(0.0, color="black", linewidth=1.0, alpha=0.5) + axes[0].set_title("Animal 1 Model Delta") + axes[0].set_xlabel("Cell index") + axes[0].set_ylabel("Gaussian - Zernike") + axes[0].legend(loc="upper right") + axes[0].grid(alpha=0.25) + + axes[1].plot(indices2, np.asarray(animal2["delta_aic"], dtype=float), marker="o", linewidth=1.4, label=r"$\Delta$ AIC") + axes[1].plot(indices2, np.asarray(animal2["delta_bic"], dtype=float), marker="s", linewidth=1.4, label=r"$\Delta$ BIC") + axes[1].axhline(0.0, color="black", linewidth=1.0, alpha=0.5) + axes[1].set_title("Animal 2 Model Delta") + axes[1].set_xlabel("Cell index") + axes[1].set_ylabel("Gaussian - Zernike") + axes[1].legend(loc="upper right") + axes[1].grid(alpha=0.25) + fig.tight_layout() + return fig + + +def build_example04_gaussian_animal1(summary: dict[str, object], payload: dict[str, object]) -> plt.Figure: + return _placecell_field_grid("Gaussian Place Fields - Animal 1", payload["animal1"], "gaussian_fields", "viridis") + + +def build_example04_zernike_animal1(summary: dict[str, object], payload: dict[str, object]) -> plt.Figure: + return _placecell_field_grid("Zernike Place Fields - Animal 1", payload["animal1"], "zernike_fields", "magma") + + +def build_example04_gaussian_animal2(summary: dict[str, object], payload: dict[str, object]) -> plt.Figure: + return _placecell_field_grid("Gaussian Place Fields - Animal 2", payload["animal2"], "gaussian_fields", "viridis") + + +def build_example04_zernike_animal2(summary: dict[str, object], payload: dict[str, object]) -> plt.Figure: + return _placecell_field_grid("Zernike Place Fields - Animal 2", payload["animal2"], "zernike_fields", "magma") + + +def build_example04_mesh_comparison(summary: dict[str, object], payload: dict[str, object]) -> plt.Figure: + mesh = payload["mesh"] + grid_x = np.asarray(mesh["grid_x"], dtype=float) + grid_y = np.asarray(mesh["grid_y"], dtype=float) + gaussian_field = np.asarray(mesh["gaussian_field"], dtype=float) + zernike_field = np.asarray(mesh["zernike_field"], dtype=float) + x_pos = np.asarray(mesh["x_pos"], dtype=float) + y_pos = np.asarray(mesh["y_pos"], dtype=float) + time_s = np.asarray(mesh["time_s"], dtype=float) + spike_times = np.asarray(mesh["spike_times"], dtype=float) + spike_x = np.interp(spike_times, time_s, x_pos) + spike_y = np.interp(spike_times, time_s, y_pos) + + fig = plt.figure(figsize=(11.0, 8.0)) + ax = fig.add_subplot(111, projection="3d") + ax.plot_wireframe(grid_x, grid_y, gaussian_field, rstride=1, cstride=1, color="blue", alpha=0.18, linewidth=0.4) + ax.plot_wireframe(grid_x, grid_y, zernike_field, rstride=1, cstride=1, color="limegreen", alpha=0.18, linewidth=0.4) + ax.plot(x_pos, y_pos, zs=0.0, zdir="z", color="black", linewidth=0.4, alpha=0.55) + ax.scatter(spike_x, spike_y, zs=0.0, zdir="z", color="red", s=4, alpha=0.7) + ax.set_title(f"Animal#1, Cell#{int(mesh['cell_index']) + 1}") + ax.set_xlabel("x position") + ax.set_ylabel("y position") + ax.set_zlabel("rate") + return fig + + +def build_example05_univariate_setup(summary: dict[str, object], payload: dict[str, object]) -> plt.Figure: + e5 = payload["experiment5"] + time_s = np.asarray(e5["time_s"], dtype=float) + stimulus = np.asarray(e5["stimulus"], dtype=float) + spikes = np.asarray(e5["spikes"], dtype=float) + + fig, (ax_stim, ax_raster) = plt.subplots(2, 1, figsize=(11.0, 6.6), sharex=True) + ax_stim.plot(time_s, stimulus, color="tab:blue", linewidth=1.7) + ax_stim.set_title("Univariate Decoding Setup") + ax_stim.set_ylabel("stimulus") + ax_stim.grid(alpha=0.25) + + for row in range(min(8, spikes.shape[1])): + spike_times = time_s[spikes[:, row] > 0.5] + if spike_times.size: + ax_raster.vlines(spike_times, row + 0.6, row + 1.4, color="black", linewidth=0.55) + ax_raster.set_title("Population Spike Raster (first 8 cells)") + ax_raster.set_xlabel("time [s]") + ax_raster.set_ylabel("cell") + ax_raster.set_ylim(0.5, min(8, spikes.shape[1]) + 0.8) + + fig.tight_layout() + return fig + + +def build_example05_univariate_decoding(summary: dict[str, object], payload: dict[str, object]) -> plt.Figure: + e5 = payload["experiment5"] + time_s = np.asarray(e5["time_s"], dtype=float) + stimulus = np.asarray(e5["stimulus"], dtype=float) + decoded = np.asarray(e5["decoded"], dtype=float) + ci_low = np.asarray(e5["ci_low"], dtype=float) + ci_high = np.asarray(e5["ci_high"], dtype=float) + + fig, ax = plt.subplots(1, 1, figsize=(11.0, 4.6)) + ax.plot(time_s, stimulus, color="black", linewidth=1.7, label="Actual") + ax.plot(time_s, decoded, color="tab:blue", linewidth=1.5, label="Decoded") + ax.fill_between(time_s, ci_low, ci_high, color="tab:blue", alpha=0.18, label="95% CI") + ax.set_title("Univariate Stimulus Decoding") + ax.set_xlabel("time [s]") + ax.set_ylabel("stimulus") + ax.grid(alpha=0.25) + ax.legend(loc="upper right") + fig.tight_layout() + return fig + + +def build_example05_reach_setup(summary: dict[str, object], payload: dict[str, object]) -> plt.Figure: + e5b = payload["experiment5b"] + spikes = np.asarray(e5b["spikes"], dtype=float) + x_true = np.asarray(e5b["x_true"], dtype=float) + y_true = np.asarray(e5b["y_true"], dtype=float) + + fig, (ax_path, ax_pop) = plt.subplots(1, 2, figsize=(11.6, 4.8)) + ax_path.plot(x_true, y_true, color="black", linewidth=1.6) + ax_path.scatter([x_true[0], x_true[-1]], [y_true[0], y_true[-1]], color=["red", "blue"], s=35) + ax_path.set_title("Actual Reach Path") + ax_path.set_xlabel("x [cm]") + ax_path.set_ylabel("y [cm]") + ax_path.grid(alpha=0.25) + + im = ax_pop.imshow(spikes.T, aspect="auto", origin="lower", cmap="gray_r") + ax_pop.set_title("Population Activity") + ax_pop.set_xlabel("time bin") + ax_pop.set_ylabel("cell") + fig.colorbar(im, ax=ax_pop, fraction=0.046, pad=0.04) + fig.tight_layout() + return fig + + +def build_example05_goal_vs_free(summary: dict[str, object], payload: dict[str, object]) -> plt.Figure: + e5b = payload["experiment5b"] + x_true = np.asarray(e5b["x_true"], dtype=float) + y_true = np.asarray(e5b["y_true"], dtype=float) + dx_goal = np.asarray(e5b["dx_goal"], dtype=float) + dy_goal = np.asarray(e5b["dy_goal"], dtype=float) + dx_free = np.asarray(e5b["dx_free"], dtype=float) + dy_free = np.asarray(e5b["dy_free"], dtype=float) + + fig, axes = plt.subplots(2, 2, figsize=(11.6, 7.4)) + axes[0, 0].plot(x_true, y_true, color="black", linewidth=1.5, label="Actual") + axes[0, 0].plot(dx_goal, dy_goal, color="blue", linewidth=1.5, label="PPAF+Goal") + axes[0, 0].plot(dx_free, dy_free, color="limegreen", linewidth=1.5, label="PPAF") + axes[0, 0].set_title("Estimated vs. Actual Reach Path") + axes[0, 0].set_xlabel("x [cm]") + axes[0, 0].set_ylabel("y [cm]") + axes[0, 0].legend(loc="lower left", fontsize=8) + + t_idx = np.arange(x_true.size, dtype=float) + axes[0, 1].plot(t_idx, x_true, color="black", linewidth=1.4, label="Actual") + axes[0, 1].plot(t_idx, dx_goal, color="blue", linewidth=1.4, label="PPAF+Goal") + axes[0, 1].plot(t_idx, dx_free, color="limegreen", linewidth=1.4, label="PPAF") + axes[0, 1].set_title("X Position") + + axes[1, 0].plot(t_idx, y_true, color="black", linewidth=1.4, label="Actual") + axes[1, 0].plot(t_idx, dy_goal, color="blue", linewidth=1.4, label="PPAF+Goal") + axes[1, 0].plot(t_idx, dy_free, color="limegreen", linewidth=1.4, label="PPAF") + axes[1, 0].set_title("Y Position") + axes[1, 0].set_xlabel("time index") + + axes[1, 1].plot(t_idx, np.gradient(y_true), color="black", linewidth=1.4, label="Actual") + axes[1, 1].plot(t_idx, np.gradient(dy_goal), color="blue", linewidth=1.4, label="PPAF+Goal") + axes[1, 1].plot(t_idx, np.gradient(dy_free), color="limegreen", linewidth=1.4, label="PPAF") + axes[1, 1].set_title("Y Velocity") + axes[1, 1].set_xlabel("time index") + + for ax in axes.ravel(): + ax.grid(alpha=0.22) + fig.tight_layout() + return fig + + +def build_example05_hybrid_setup(summary: dict[str, object], payload: dict[str, object]) -> plt.Figure: + e6 = payload["experiment6"] + time_s = np.asarray(e6["time_s"], dtype=float) + state_true = np.asarray(e6["state_true"], dtype=float) + x_pos = np.asarray(e6["x_pos"], dtype=float) + y_pos = np.asarray(e6["y_pos"], dtype=float) + spikes = np.asarray(e6["spikes"], dtype=float) + + fig, axes = plt.subplots(3, 1, figsize=(11.2, 7.8), sharex=True) + axes[0].plot(time_s, state_true, color="black", linewidth=1.5) + axes[0].set_title("Hybrid Setup: Hidden State") + axes[0].set_ylabel("state") + + axes[1].plot(time_s, x_pos, color="tab:blue", linewidth=1.4, label="x") + axes[1].plot(time_s, y_pos, color="tab:green", linewidth=1.4, label="y") + axes[1].set_title("Reach Position") + axes[1].set_ylabel("position") + axes[1].legend(loc="upper right", fontsize=8) + + for row in range(min(10, spikes.shape[1])): + spike_times = time_s[spikes[:, row] > 0.5] + if spike_times.size: + axes[2].vlines(spike_times, row + 0.6, row + 1.4, color="black", linewidth=0.55) + axes[2].set_title("Spike Raster (first 10 cells)") + axes[2].set_xlabel("time [s]") + axes[2].set_ylabel("cell") + + fig.tight_layout() + return fig + + +def build_example05_hybrid_summary(summary: dict[str, object], payload: dict[str, object]) -> plt.Figure: + e6 = payload["experiment6"] + time_s = np.asarray(e6["time_s"], dtype=float) + state_true = np.asarray(e6["state_true"], dtype=float) + state_hat = np.asarray(e6["state_hat"], dtype=float) + state_prob_1 = np.asarray(e6["state_prob_1"], dtype=float) + x_pos = np.asarray(e6["x_pos"], dtype=float) + y_pos = np.asarray(e6["y_pos"], dtype=float) + decoded_x = np.asarray(e6["decoded_x"], dtype=float) + decoded_y = np.asarray(e6["decoded_y"], dtype=float) + + fig, axes = plt.subplots(2, 2, figsize=(12.0, 7.2)) + axes[0, 0].plot(time_s, state_true, color="black", linewidth=1.4, label="Actual") + axes[0, 0].plot(time_s, state_hat, color="limegreen", linewidth=1.4, label="Estimated") + axes[0, 0].set_title("Estimated vs. Actual State") + axes[0, 0].legend(loc="upper right", fontsize=8) + + axes[1, 0].plot(time_s, state_prob_1, color="tab:blue", linewidth=1.4) + axes[1, 0].set_title("Probability of State 1") + axes[1, 0].set_xlabel("time [s]") + + axes[0, 1].plot(x_pos, y_pos, color="black", linewidth=1.5, label="Actual") + axes[0, 1].plot(decoded_x, decoded_y, color="limegreen", linewidth=1.5, label="Decoded") + axes[0, 1].set_title("Estimated vs. Actual Reach Path") + axes[0, 1].set_xlabel("x [cm]") + axes[0, 1].set_ylabel("y [cm]") + axes[0, 1].legend(loc="lower left", fontsize=8) + + axes[1, 1].plot(time_s, x_pos, color="black", linewidth=1.4, label="Actual x") + axes[1, 1].plot(time_s, decoded_x, color="tab:blue", linewidth=1.4, label="Decoded x") + axes[1, 1].plot(time_s, y_pos, color="0.45", linewidth=1.2, label="Actual y") + axes[1, 1].plot(time_s, decoded_y, color="limegreen", linewidth=1.2, label="Decoded y") + axes[1, 1].set_title("Decoded State Components") + axes[1, 1].set_xlabel("time [s]") + axes[1, 1].legend(loc="lower left", fontsize=8) + + for ax in axes.ravel(): + ax.grid(alpha=0.22) + fig.tight_layout() + return fig + + +EXAMPLE_FIGURE_BUILDERS: dict[str, list[tuple[str, FigureBuilder]]] = { + "example01": [ + ("fig01_constant_mg_summary.png", build_example01_constant_mg_summary), + ("fig02_washout_raster_overview.png", build_example01_washout_raster), + ("fig03_piecewise_baseline_comparison.png", build_example01_piecewise_baseline), + ], + "example02": [ + ("fig01_data_overview.png", build_example02_data_overview), + ("fig02_lag_and_model_comparison.png", build_example02_model_comparison), + ], + "example03": [ + ("fig01_simulated_and_real_rasters.png", build_example03_simulated_and_real_rasters), + ("fig02_psth_comparison.png", build_example03_psth_comparison), + ("fig03_ssglm_simulation_summary.png", build_example03_ssglm_simulation_summary), + ("fig04_ssglm_fit_diagnostics.png", build_example03_ssglm_fit_diagnostics), + ("fig05_stimulus_effect_surfaces.png", build_example03_stimulus_effect_surfaces), + ("fig06_learning_trial_comparison.png", build_example03_learning_trial_comparison), + ], + "example04": [ + ("fig01_example_cells_path_overlay.png", build_example04_path_overlay), + ("fig02_model_summary_statistics.png", build_example04_model_summary), + ("fig03_gaussian_place_fields_animal1.png", build_example04_gaussian_animal1), + ("fig04_zernike_place_fields_animal1.png", build_example04_zernike_animal1), + ("fig05_gaussian_place_fields_animal2.png", build_example04_gaussian_animal2), + ("fig06_zernike_place_fields_animal2.png", build_example04_zernike_animal2), + ("fig07_example_cell_mesh_comparison.png", build_example04_mesh_comparison), + ], + "example05": [ + ("fig01_univariate_setup.png", build_example05_univariate_setup), + ("fig02_univariate_decoding.png", build_example05_univariate_decoding), + ("fig03_reach_and_population_setup.png", build_example05_reach_setup), + ("fig04_ppaf_goal_vs_free.png", build_example05_goal_vs_free), + ("fig05_hybrid_setup.png", build_example05_hybrid_setup), + ("fig06_hybrid_decoding_summary.png", build_example05_hybrid_summary), + ], +} + + +def list_named_paper_figures(example_id: str) -> list[str]: + builders = EXAMPLE_FIGURE_BUILDERS.get(example_id) + if builders is None: + raise ValueError(f"No figure builders registered for {example_id}") + return [filename for filename, _builder in builders] + + +def export_named_paper_figures( + example_id: str, + *, + summary: dict[str, float], + payload: dict[str, object], + export_dir: Path, +) -> list[Path]: + builders = EXAMPLE_FIGURE_BUILDERS.get(example_id) + if builders is None: + raise ValueError(f"No figure builders registered for {example_id}") + return export_figure_set(summary=summary, payload=payload, export_dir=export_dir, builders=builders) + + +__all__ = [ + "default_export_dir", + "export_named_paper_figures", + "list_named_paper_figures", +] diff --git a/nstat/paper_gallery.py b/nstat/paper_gallery.py new file mode 100644 index 00000000..59b32990 --- /dev/null +++ b/nstat/paper_gallery.py @@ -0,0 +1,151 @@ +from __future__ import annotations + +import json +from pathlib import Path +from typing import Any + +import yaml + + +def _repo_root() -> Path: + return Path(__file__).resolve().parents[1] + + +def load_paper_example_manifest(repo_root: Path | None = None) -> dict[str, Any]: + base = _repo_root() if repo_root is None else repo_root.resolve() + path = base / "examples" / "paper" / "manifest.yml" + return yaml.safe_load(path.read_text(encoding="utf-8")) + + +def build_gallery_manifest(repo_root: Path | None = None) -> dict[str, Any]: + base = _repo_root() if repo_root is None else repo_root.resolve() + payload = load_paper_example_manifest(base) + examples: list[dict[str, Any]] = [] + for row in payload["examples"]: + figure_dir = f"docs/figures/{row['example_id']}" + examples.append( + { + "example_id": row["example_id"], + "title": row["title"], + "source_script": row["script"], + "matlab_source": row["matlab_source"], + "description": row["description"], + "question": row["question"], + "run_command": f"python {row['script']}", + "figure_dir": figure_dir, + "figure_files": list(row["figure_files"]), + "thumbnail_file": row["figure_files"][0], + "sections": list(row["sections"]), + } + ) + return { + "figure_root": "docs/figures", + "examples": examples, + } + + +def render_paper_examples_markdown(repo_root: Path | None = None) -> str: + manifest = build_gallery_manifest(repo_root) + lines = [ + "# nSTAT Python Paper Examples", + "", + "This page mirrors the MATLAB paper-example index for the standalone Python port.", + "", + "Canonical source files:", + "- `examples/paper/*.py`", + "- `nstat/paper_examples_full.py`", + "", + "## Run Everything", + "", + "```bash", + "python tools/paper_examples/build_gallery.py", + "```", + "", + "Outputs:", + "- Figure metadata: `docs/figures/manifest.json`", + "- Gallery page: `docs/paper_examples.md`", + "- Figures: `docs/figures/example01/` ... `docs/figures/example05/`", + "", + "## Example Index", + "", + "| ID | Thumbnail | Standalone source | Question | Run command | Figure gallery |", + "|---|---|---|---|---|---|", + ] + for row in manifest["examples"]: + lines.append( + f"| `{row['example_id']}` | ![{row['example_id'].replace('example', 'Example ')}]({row['thumbnail_file'].replace('docs/', '')}) | " + f"[{Path(row['source_script']).name}](../{row['source_script']}) | {row['question']} | " + f"`{row['run_command']}` | [gallery page](./figures/{row['example_id']}/README.md) |" + ) + + lines.extend( + [ + "", + "```{toctree}", + ":hidden:", + "", + ] + ) + for row in manifest["examples"]: + lines.append(f"figures/{row['example_id']}/README") + lines.extend(["```", "", "## Gallery", ""]) + for row in manifest["examples"]: + lines.extend( + [ + f"### {row['example_id'].replace('example', 'Example ')}: {row['title']}", + "", + f"Question: {row['question']}", + "", + f"Run command: `{row['run_command']}`", + "", + f"![{row['example_id'].replace('example', 'Example ')}]({row['thumbnail_file'].replace('docs/', '')})", + "", + "Expected figure files:", + ] + ) + for fig in row["figure_files"]: + lines.append(f"- `{fig}`") + lines.append("") + return "\n".join(lines).rstrip() + "\n" + + +def ensure_gallery_dirs(repo_root: Path | None = None) -> list[Path]: + base = _repo_root() if repo_root is None else repo_root.resolve() + manifest = load_paper_example_manifest(base) + created: list[Path] = [] + for row in manifest["examples"]: + directory = base / "docs" / "figures" / row["example_id"] + directory.mkdir(parents=True, exist_ok=True) + readme = directory / "README.md" + if not readme.exists(): + readme.write_text( + f"# {row['example_id']}\n\nGenerated figure outputs for `{row['name']}` are written here.\n", + encoding="utf-8", + ) + created.append(directory) + return created + + +def write_gallery_outputs(repo_root: Path | None = None) -> tuple[Path, Path]: + base = _repo_root() if repo_root is None else repo_root.resolve() + ensure_gallery_dirs(base) + docs_dir = base / "docs" + docs_dir.mkdir(parents=True, exist_ok=True) + figures_dir = docs_dir / "figures" + figures_dir.mkdir(parents=True, exist_ok=True) + + manifest_path = figures_dir / "manifest.json" + manifest_path.write_text(json.dumps(build_gallery_manifest(base), indent=2), encoding="utf-8") + + markdown_path = docs_dir / "paper_examples.md" + markdown_path.write_text(render_paper_examples_markdown(base), encoding="utf-8") + return manifest_path, markdown_path + + +__all__ = [ + "build_gallery_manifest", + "ensure_gallery_dirs", + "load_paper_example_manifest", + "render_paper_examples_markdown", + "write_gallery_outputs", +] diff --git a/nstat/parity_report.py b/nstat/parity_report.py new file mode 100644 index 00000000..6020e41d --- /dev/null +++ b/nstat/parity_report.py @@ -0,0 +1,133 @@ +from __future__ import annotations + +from pathlib import Path +from typing import Any + +import yaml + + +SUMMARY_SECTIONS = ( + "public_api", + "help_workflows", + "paper_examples", + "docs_gallery", + "installer_setup", + "repo_structure", +) + + +def _repo_root() -> Path: + return Path(__file__).resolve().parents[1] + + +def _has_outstanding(payload: dict[str, Any], section_name: str) -> bool: + counts = payload["summary"][section_name] + return counts["partial"] > 0 or counts["missing"] > 0 + + +def load_parity_manifest(repo_root: Path | None = None) -> dict[str, Any]: + base = _repo_root() if repo_root is None else repo_root.resolve() + path = base / "parity" / "manifest.yml" + return yaml.safe_load(path.read_text(encoding="utf-8")) + + +def _iter_outstanding_rows(payload: dict[str, Any]) -> list[tuple[str, dict[str, Any]]]: + rows: list[tuple[str, dict[str, Any]]] = [] + for section_name in SUMMARY_SECTIONS: + for row in payload.get(section_name, []): + if row.get("status") in {"partial", "missing"}: + rows.append((section_name, row)) + return rows + + +def _iter_non_applicable_rows(payload: dict[str, Any]) -> list[tuple[str, dict[str, Any]]]: + rows: list[tuple[str, dict[str, Any]]] = [] + for section_name in SUMMARY_SECTIONS: + for row in payload.get(section_name, []): + if row.get("status") == "not_applicable": + rows.append((section_name, row)) + return rows + + +def render_parity_report(repo_root: Path | None = None) -> str: + payload = load_parity_manifest(repo_root) + lines = [ + "# nSTAT Python Parity Report", + "", + "Generated from `parity/manifest.yml`.", + "", + f"- MATLAB reference: {payload['source_repositories']['matlab']}", + f"- Python target: {payload['source_repositories']['python']}", + f"- Inventory version: {payload['version']}", + f"- Generated on: {payload['generated_on']}", + "", + "## Summary", + "", + "| Section | Mapped | Partial | Missing | Not Applicable |", + "|---|---:|---:|---:|---:|", + ] + for section_name in SUMMARY_SECTIONS: + counts = payload["summary"][section_name] + label = section_name.replace("_", " ") + lines.append( + f"| `{label}` | {counts['mapped']} | {counts['partial']} | {counts['missing']} | {counts['not_applicable']} |" + ) + + lines.extend(["", "## Coverage Notes", ""]) + lines.append( + "- Public API: no missing MATLAB public APIs remain; only the MATLAB help-browser utility is explicitly non-applicable." + ) + lines.append("- Help/notebook parity: all inventoried MATLAB help workflows are mapped to Python notebooks or equivalents.") + if _has_outstanding(payload, "paper_examples") or _has_outstanding(payload, "docs_gallery"): + lines.append( + "- Paper examples and docs gallery: canonical structure is present, but dataset-backed outputs and figure files are still partial." + ) + else: + lines.append( + "- Paper examples and docs gallery: all canonical paper examples and committed gallery directories are mapped." + ) + lines.extend(["", "## Remaining Deltas", ""]) + + outstanding = _iter_outstanding_rows(payload) + if not outstanding: + lines.append("No partial or missing items remain.") + else: + current_section = "" + for section_name, row in outstanding: + if section_name != current_section: + if current_section: + lines.append("") + lines.append(f"### `{section_name}`") + lines.append("") + current_section = section_name + label = row.get("matlab") or row.get("python_target") or row.get("path") or row.get("name") + python_target = row.get("python_target") or row.get("python_script") or row.get("python_path") or row.get("path") + notes = row.get("notes", "") + lines.append(f"- `{label}` -> `{python_target}`: {notes}") + + lines.extend(["", "## Justified Non-Applicable Items", ""]) + non_applicable = _iter_non_applicable_rows(payload) + if not non_applicable: + lines.append("None.") + else: + for section_name, row in non_applicable: + label = row.get("matlab") or row.get("path") or row.get("name") + notes = row.get("notes", "") + lines.append(f"- `{section_name}`: `{label}`. {notes}") + + lines.append("") + return "\n".join(lines) + + +def write_parity_report(repo_root: Path | None = None) -> Path: + base = _repo_root() if repo_root is None else repo_root.resolve() + report_path = base / "parity" / "report.md" + report_path.write_text(render_parity_report(base), encoding="utf-8") + return report_path + + +__all__ = [ + "load_parity_manifest", + "render_parity_report", + "write_parity_report", +] diff --git a/parity/README.md b/parity/README.md new file mode 100644 index 00000000..4787e04b --- /dev/null +++ b/parity/README.md @@ -0,0 +1,19 @@ +# Parity Inventory + +This directory tracks MATLAB-to-Python parity for the standalone port. + +Current inventory source: +- [`manifest.yml`](./manifest.yml) +- [`report.md`](./report.md) + +Generated report: + +```bash +python tools/parity/build_report.py +``` + +Current headline status: +- Public API coverage matches the MATLAB inventory except for the explicitly non-applicable `nstatOpenHelpPage`. +- Help/notebook parity covers the inventoried MATLAB help workflow surface. +- Canonical paper examples, gallery structure, and README/docs presentation now exist in Python, but the example outputs remain partial until the canonical figures are generated. +- CI now validates API surface, dataset integrity, notebook smoke execution, paper gallery drift, and docs builds. diff --git a/parity/manifest.yml b/parity/manifest.yml new file mode 100644 index 00000000..eb540278 --- /dev/null +++ b/parity/manifest.yml @@ -0,0 +1,432 @@ +version: 1 +generated_on: 2026-03-06 +source_repositories: + matlab: https://github.com/cajigaslab/nSTAT + python: https://github.com/cajigaslab/nSTAT-python +status_legend: + - mapped + - partial + - missing + - not_applicable +summary: + public_api: + mapped: 18 + partial: 0 + missing: 0 + not_applicable: 1 + help_workflows: + mapped: 29 + partial: 0 + missing: 0 + not_applicable: 0 + paper_examples: + mapped: 8 + partial: 0 + missing: 0 + not_applicable: 0 + docs_gallery: + mapped: 8 + partial: 0 + missing: 0 + not_applicable: 0 + installer_setup: + mapped: 4 + partial: 0 + missing: 0 + not_applicable: 3 + repo_structure: + mapped: 1 + partial: 0 + missing: 0 + not_applicable: 0 +public_api: + - matlab: Analysis + matlab_path: Analysis.m + python_target: nstat.Analysis + python_path: nstat/analysis.py + status: mapped + notes: Exported from the package root. + - matlab: CIF + matlab_path: CIF.m + python_target: nstat.CIF + python_path: nstat/cif.py + status: mapped + notes: Canonical CIF class is exported from the package root. + - matlab: ConfidenceInterval + matlab_path: ConfidenceInterval.m + python_target: nstat.ConfidenceInterval + python_path: nstat/ConfidenceInterval.py + status: mapped + notes: Compatibility adapter is exported from the package root. + - matlab: ConfigColl + matlab_path: ConfigColl.m + python_target: nstat.ConfigColl + python_path: nstat/ConfigColl.py + status: mapped + notes: MATLAB-style adapter is exported from the package root. + - matlab: CovColl + matlab_path: CovColl.m + python_target: nstat.CovColl + python_path: nstat/CovColl.py + status: mapped + notes: MATLAB-style adapter is exported from the package root. + - matlab: Covariate + matlab_path: Covariate.m + python_target: nstat.Covariate + python_path: nstat/signal.py + status: mapped + notes: Canonical signal object is exported from the package root. + - matlab: DecodingAlgorithms + matlab_path: DecodingAlgorithms.m + python_target: nstat.DecodingAlgorithms + python_path: nstat/decoding_algorithms.py + status: mapped + notes: Exported from the package root. + - matlab: Events + matlab_path: Events.m + python_target: nstat.Events + python_path: nstat/events.py + status: mapped + notes: Exported from the package root. + - matlab: FitResSummary + matlab_path: FitResSummary.m + python_target: nstat.FitResSummary + python_path: nstat/FitResSummary.py + status: mapped + notes: MATLAB-style adapter is exported from the package root. + - matlab: FitResult + matlab_path: FitResult.m + python_target: nstat.FitResult + python_path: nstat/FitResult.py + status: mapped + notes: MATLAB-style adapter is exported from the package root. + - matlab: History + matlab_path: History.m + python_target: nstat.History + python_path: nstat/history.py + status: mapped + notes: Exported from the package root. + - matlab: SignalObj + matlab_path: SignalObj.m + python_target: nstat.SignalObj + python_path: nstat/SignalObj.py + status: mapped + notes: MATLAB-style adapter is exported from the package root. + - matlab: Trial + matlab_path: Trial.m + python_target: nstat.Trial + python_path: nstat/trial.py + status: mapped + notes: Exported from the package root. + - matlab: TrialConfig + matlab_path: TrialConfig.m + python_target: nstat.TrialConfig + python_path: nstat/trial.py + status: mapped + notes: Exported from the package root. + - matlab: nspikeTrain + matlab_path: nspikeTrain.m + python_target: nstat.nspikeTrain + python_path: nstat/nspikeTrain.py + status: mapped + notes: MATLAB-style adapter is exported from the package root. + - matlab: nstColl + matlab_path: nstColl.m + python_target: nstat.nstColl + python_path: nstat/nstColl.py + status: mapped + notes: MATLAB-style adapter is exported from the package root. + - matlab: getPaperDataDirs + matlab_path: getPaperDataDirs.m + python_target: nstat.getPaperDataDirs + python_path: nstat/data_manager.py + status: mapped + notes: Package root exports both MATLAB-style and snake-case helpers. + - matlab: nSTAT_Install + matlab_path: nSTAT_Install.m + python_target: nstat.nSTAT_Install + python_path: nstat/nstat_install.py + status: mapped + notes: MATLAB-style alias is exported from the package root. + - matlab: nstatOpenHelpPage + matlab_path: nstatOpenHelpPage.m + python_target: null + python_path: null + status: not_applicable + notes: MATLAB help-browser integration does not have a direct Python equivalent. +help_workflows: + - matlab: AnalysisExamples + matlab_path: helpfiles/AnalysisExamples.mlx + python_target: notebooks/AnalysisExamples.ipynb + status: mapped + notes: Notebook exists with matching name. + - matlab: AnalysisExamples2 + matlab_path: helpfiles/AnalysisExamples2.mlx + python_target: notebooks/AnalysisExamples2.ipynb + status: mapped + notes: Notebook exists with matching name. + - matlab: ConfidenceIntervalOverview + matlab_path: helpfiles/ConfidenceIntervalOverview.m + python_target: notebooks/ConfidenceIntervalOverview.ipynb + status: mapped + notes: Tutorial notebook exists with matching name. + - matlab: ConfigCollExamples + matlab_path: helpfiles/ConfigCollExamples.mlx + python_target: notebooks/ConfigCollExamples.ipynb + status: mapped + notes: Notebook exists with matching name. + - matlab: CovCollExamples + matlab_path: helpfiles/CovCollExamples.mlx + python_target: notebooks/CovCollExamples.ipynb + status: mapped + notes: Notebook exists with matching name. + - matlab: CovariateExamples + matlab_path: helpfiles/CovariateExamples.mlx + python_target: notebooks/CovariateExamples.ipynb + status: mapped + notes: Notebook exists with matching name. + - matlab: DecodingExample + matlab_path: helpfiles/DecodingExample.mlx + python_target: notebooks/DecodingExample.ipynb + status: mapped + notes: Notebook exists with matching name. + - matlab: DecodingExampleWithHist + matlab_path: helpfiles/DecodingExampleWithHist.mlx + python_target: notebooks/DecodingExampleWithHist.ipynb + status: mapped + notes: Notebook exists with matching name. + - matlab: EventsExamples + matlab_path: helpfiles/EventsExamples.mlx + python_target: notebooks/EventsExamples.ipynb + status: mapped + notes: Notebook exists with matching name. + - matlab: ExplicitStimulusWhiskerData + matlab_path: helpfiles/ExplicitStimulusWhiskerData.mlx + python_target: notebooks/ExplicitStimulusWhiskerData.ipynb + status: mapped + notes: Notebook exists with matching name. + - matlab: FitResSummaryExamples + matlab_path: helpfiles/FitResSummaryExamples.mlx + python_target: notebooks/FitResSummaryExamples.ipynb + status: mapped + notes: Notebook exists with matching name. + - matlab: FitResultExamples + matlab_path: helpfiles/FitResultExamples.mlx + python_target: notebooks/FitResultExamples.ipynb + status: mapped + notes: Notebook exists with matching name. + - matlab: FitResultReference + matlab_path: helpfiles/FitResultReference.m + python_target: notebooks/FitResultReference.ipynb + status: mapped + notes: Notebook exists with matching name. + - matlab: HippocampalPlaceCellExample + matlab_path: helpfiles/HippocampalPlaceCellExample.mlx + python_target: notebooks/HippocampalPlaceCellExample.ipynb + status: mapped + notes: Notebook exists with matching name. + - matlab: HistoryExamples + matlab_path: helpfiles/HistoryExamples.mlx + python_target: notebooks/HistoryExamples.ipynb + status: mapped + notes: Notebook exists with matching name. + - matlab: HybridFilterExample + matlab_path: helpfiles/HybridFilterExample.mlx + python_target: notebooks/HybridFilterExample.ipynb + status: mapped + notes: Notebook exists with matching name. + - matlab: NetworkTutorial + matlab_path: helpfiles/NetworkTutorial.mlx + python_target: notebooks/NetworkTutorial.ipynb + status: mapped + notes: Notebook exists with matching name. + - matlab: PPSimExample + matlab_path: helpfiles/PPSimExample.mlx + python_target: notebooks/PPSimExample.ipynb + status: mapped + notes: Notebook exists with matching name. + - matlab: PPThinning + matlab_path: helpfiles/PPThinning.mlx + python_target: notebooks/PPThinning.ipynb + status: mapped + notes: Notebook exists with matching name. + - matlab: PSTHEstimation + matlab_path: helpfiles/PSTHEstimation.mlx + python_target: notebooks/PSTHEstimation.ipynb + status: mapped + notes: Notebook exists with matching name. + - matlab: SignalObjExamples + matlab_path: helpfiles/SignalObjExamples.mlx + python_target: notebooks/SignalObjExamples.ipynb + status: mapped + notes: Notebook exists with matching name. + - matlab: StimulusDecode2D + matlab_path: helpfiles/StimulusDecode2D.mlx + python_target: notebooks/StimulusDecode2D.ipynb + status: mapped + notes: Notebook exists with matching name. + - matlab: TrialConfigExamples + matlab_path: helpfiles/TrialConfigExamples.mlx + python_target: notebooks/TrialConfigExamples.ipynb + status: mapped + notes: Notebook exists with matching name. + - matlab: TrialExamples + matlab_path: helpfiles/TrialExamples.mlx + python_target: notebooks/TrialExamples.ipynb + status: mapped + notes: Notebook exists with matching name. + - matlab: ValidationDataSet + matlab_path: helpfiles/ValidationDataSet.mlx + python_target: notebooks/ValidationDataSet.ipynb + status: mapped + notes: Notebook exists with matching name. + - matlab: mEPSCAnalysis + matlab_path: helpfiles/mEPSCAnalysis.mlx + python_target: notebooks/mEPSCAnalysis.ipynb + status: mapped + notes: Notebook exists with matching name. + - matlab: nSTATPaperExamples + matlab_path: helpfiles/nSTATPaperExamples.mlx + python_target: notebooks/nSTATPaperExamples.ipynb + status: mapped + notes: Notebook exists with matching name. + - matlab: nSpikeTrainExamples + matlab_path: helpfiles/nSpikeTrainExamples.mlx + python_target: notebooks/nSpikeTrainExamples.ipynb + status: mapped + notes: Notebook exists with matching name. + - matlab: nstCollExamples + matlab_path: helpfiles/nstCollExamples.mlx + python_target: notebooks/nstCollExamples.ipynb + status: mapped + notes: Notebook exists with matching name. +paper_examples: + - matlab: example01_mepsc_poisson + matlab_path: examples/paper/example01_mepsc_poisson.m + python_target: nstat.paper_examples_full.run_experiment1 + python_script: examples/paper/example01_mepsc_poisson.py + status: mapped + notes: Canonical Python script exports the MATLAB-named figure set into docs/figures/example01/. + - matlab: example02_whisker_stimulus_thalamus + matlab_path: examples/paper/example02_whisker_stimulus_thalamus.m + python_target: nstat.paper_examples_full.run_experiment2 + python_script: examples/paper/example02_whisker_stimulus_thalamus.py + status: mapped + notes: Canonical Python script exports the MATLAB-named figure set into docs/figures/example02/. + - matlab: example03_psth_and_ssglm + matlab_path: examples/paper/example03_psth_and_ssglm.m + python_target: nstat.paper_examples_full.run_experiment3 + python_script: examples/paper/example03_psth_and_ssglm.py + status: mapped + notes: Canonical Python script exports the MATLAB-named figure set for experiment3 and experiment3b into docs/figures/example03/. + - matlab: example04_place_cells_continuous_stimulus + matlab_path: examples/paper/example04_place_cells_continuous_stimulus.m + python_target: nstat.paper_examples_full.run_experiment4 + python_script: examples/paper/example04_place_cells_continuous_stimulus.py + status: mapped + notes: Canonical Python script exports the MATLAB-named figure set into docs/figures/example04/. + - matlab: example05_decoding_ppaf_pphf + matlab_path: examples/paper/example05_decoding_ppaf_pphf.m + python_target: nstat.paper_examples_full.run_experiment5 + python_script: examples/paper/example05_decoding_ppaf_pphf.py + status: mapped + notes: Canonical Python script exports the MATLAB-named figure set for experiment5, experiment5b, and experiment6 into docs/figures/example05/. + - matlab: nSTATPaperExamples section 3b + matlab_path: helpfiles/nSTATPaperExamples.m#example-3b + python_target: nstat.paper_examples_full.run_experiment3b + python_script: examples/paper/example03_psth_and_ssglm.py + status: mapped + notes: Section 3b is exported through the canonical example03 Python script and gallery. + - matlab: nSTATPaperExamples section 5b + matlab_path: helpfiles/nSTATPaperExamples.m#example-5b + python_target: nstat.paper_examples_full.run_experiment5b + python_script: examples/paper/example05_decoding_ppaf_pphf.py + status: mapped + notes: Section 5b is exported through the canonical example05 Python script and gallery. + - matlab: nSTATPaperExamples section 6 + matlab_path: helpfiles/nSTATPaperExamples.m#experiment-6 + python_target: nstat.paper_examples_full.run_experiment6 + python_script: examples/paper/example05_decoding_ppaf_pphf.py + status: mapped + notes: Section 6 is exported through the canonical example05 Python script and gallery. +docs_gallery: + - matlab: docs/paper_examples.md + python_target: docs/paper_examples.md + status: mapped + notes: Generated from the canonical Python paper-example manifest. + - matlab: docs/figures/manifest.json + python_target: docs/figures/manifest.json + status: mapped + notes: Generated from the canonical Python paper-example manifest. + - matlab: docs/figures/example01/ + python_target: docs/figures/example01/ + status: mapped + notes: Gallery directory contains the MATLAB-named PNG set generated by the canonical Python script. + - matlab: docs/figures/example02/ + python_target: docs/figures/example02/ + status: mapped + notes: Gallery directory contains the MATLAB-named PNG set generated by the canonical Python script. + - matlab: docs/figures/example03/ + python_target: docs/figures/example03/ + status: mapped + notes: Gallery directory contains the MATLAB-named PNG set generated by the canonical Python script. + - matlab: docs/figures/example04/ + python_target: docs/figures/example04/ + status: mapped + notes: Gallery directory contains the MATLAB-named PNG set generated by the canonical Python script. + - matlab: docs/figures/example05/ + python_target: docs/figures/example05/ + status: mapped + notes: Gallery directory contains the MATLAB-named PNG set generated by the canonical Python script. + - matlab: README five-example gallery + python_target: README.md + status: mapped + notes: README mirrors the MATLAB five-example gallery pattern and links the Python paper-example docs page. +installer_setup: + - matlab: nSTAT_Install console entrypoint + matlab_path: nSTAT_Install.m + python_target: nstat-install + python_path: pyproject.toml + status: mapped + notes: Console script is defined in pyproject.toml. + - matlab: nSTAT_Install programmatic entrypoint + matlab_path: nSTAT_Install.m + python_target: nstat.install.nstat_install + python_path: nstat/install.py + status: mapped + notes: Programmatic installer exists and is documented. + - matlab: DownloadExampleData option + matlab_path: nSTAT_Install.m + python_target: nstat.install.nstat_install(download_example_data=...) + python_path: nstat/install.py + status: mapped + notes: Prompt/always/never modes exist in Python. + - matlab: RebuildDocSearch option + matlab_path: nSTAT_Install.m + python_target: nstat.install.nstat_install(rebuild_doc_search=...) + python_path: nstat/install.py + status: mapped + notes: The installer can rebuild the local Sphinx HTML search index and reports whether searchindex.js was created. + - matlab: CleanUserPathPrefs option + matlab_path: nSTAT_Install.m + python_target: nstat.install.nstat_install(clean_user_path_prefs=...) + python_path: nstat/install.py + status: not_applicable + notes: Accepted as a compatibility no-op because Python does not use MATLAB-style saved user path preferences. + - matlab: MATLAB runtime path pruning + matlab_path: nSTAT_Install.m + python_target: null + python_path: null + status: not_applicable + notes: Python packaging/import resolution replaces MATLAB path management. + - matlab: MATLAB toolbox cache refresh and savepath + matlab_path: nSTAT_Install.m + python_target: null + python_path: null + status: not_applicable + notes: There is no Python equivalent to MATLAB toolbox cache refresh or savepath persistence. +repo_structure: + - item: canonical_package_layout + python_target: nstat/ + status: mapped + notes: The repo now uses a single root-level nstat/ package with no src/nstat mirror or repo-root package stub. diff --git a/parity/report.md b/parity/report.md new file mode 100644 index 00000000..2469bfcd --- /dev/null +++ b/parity/report.md @@ -0,0 +1,36 @@ +# nSTAT Python Parity Report + +Generated from `parity/manifest.yml`. + +- MATLAB reference: https://github.com/cajigaslab/nSTAT +- Python target: https://github.com/cajigaslab/nSTAT-python +- Inventory version: 1 +- Generated on: 2026-03-06 + +## Summary + +| Section | Mapped | Partial | Missing | Not Applicable | +|---|---:|---:|---:|---:| +| `public api` | 18 | 0 | 0 | 1 | +| `help workflows` | 29 | 0 | 0 | 0 | +| `paper examples` | 8 | 0 | 0 | 0 | +| `docs gallery` | 8 | 0 | 0 | 0 | +| `installer setup` | 4 | 0 | 0 | 3 | +| `repo structure` | 1 | 0 | 0 | 0 | + +## Coverage Notes + +- Public API: no missing MATLAB public APIs remain; only the MATLAB help-browser utility is explicitly non-applicable. +- Help/notebook parity: all inventoried MATLAB help workflows are mapped to Python notebooks or equivalents. +- Paper examples and docs gallery: all canonical paper examples and committed gallery directories are mapped. + +## Remaining Deltas + +No partial or missing items remain. + +## Justified Non-Applicable Items + +- `public_api`: `nstatOpenHelpPage`. MATLAB help-browser integration does not have a direct Python equivalent. +- `installer_setup`: `CleanUserPathPrefs option`. Accepted as a compatibility no-op because Python does not use MATLAB-style saved user path preferences. +- `installer_setup`: `MATLAB runtime path pruning`. Python packaging/import resolution replaces MATLAB path management. +- `installer_setup`: `MATLAB toolbox cache refresh and savepath`. There is no Python equivalent to MATLAB toolbox cache refresh or savepath persistence. diff --git a/pyproject.toml b/pyproject.toml index 3f48d3d9..5485d16c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,6 +24,8 @@ dependencies = [ dev = [ "pytest>=8.0", "scikit-image>=0.22", + "sphinx>=8.0", + "myst-parser>=4.0", ] [tool.setuptools.packages.find] diff --git a/src/nstat/data_manager.py b/src/nstat/data_manager.py deleted file mode 100644 index 48c08540..00000000 --- a/src/nstat/data_manager.py +++ /dev/null @@ -1,188 +0,0 @@ -"""Example data directory resolution and DOI-backed download helpers. - -This module keeps raw example assets out of Git while allowing notebooks and -tests to materialize the canonical nSTAT example dataset on demand. -""" - -from __future__ import annotations - -import json -import os -import re -import shutil -import tempfile -import time -import urllib.request -import zipfile -from pathlib import Path -from typing import Final - - -DOI_URL: Final[str] = "https://doi.org/10.6084/m9.figshare.4834640" -DEFAULT_RELATIVE_CACHE: Final[Path] = Path("data_cache") / "nstat_data" -SENTINEL_NAME: Final[str] = ".nstat_data_ok.json" -REQUIRED_SUBDIRS: Final[tuple[str, ...]] = ( - "Explicit Stimulus", - "Place Cells", - "mEPSCs", -) -DOWNLOAD_URL_RE: Final[re.Pattern[str]] = re.compile( - r"https?://(?:www\.)?figshare\.com/ndownloader/files/\d+" -) - - -def _repo_root() -> Path: - return Path(__file__).resolve().parents[2] - - -def get_data_dir() -> Path: - """Return canonical on-disk example-data directory. - - Resolution order: - 1. ``NSTAT_DATA_DIR`` environment variable. - 2. ``/data_cache/nstat_data`` - """ - - explicit = os.environ.get("NSTAT_DATA_DIR") - if explicit: - return Path(explicit).expanduser().resolve() - return (_repo_root() / DEFAULT_RELATIVE_CACHE).resolve() - - -def data_is_present(data_dir: Path) -> bool: - """Return True when the expected dataset footprint exists.""" - - if not data_dir.exists() or not data_dir.is_dir(): - return False - for subdir in REQUIRED_SUBDIRS: - if not (data_dir / subdir).exists(): - return False - return True - - -def _write_sentinel(data_dir: Path, *, source_url: str) -> None: - payload = { - "doi": DOI_URL, - "source_url": source_url, - "timestamp_utc": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), - } - (data_dir / SENTINEL_NAME).write_text(json.dumps(payload, indent=2), encoding="utf-8") - - -def _http_get(url: str, *, timeout: float = 60.0) -> tuple[str, bytes]: - req = urllib.request.Request( - url, - headers={ - "User-Agent": "nSTAT-python-data-manager/1.0 (+https://github.com/cajigaslab/nSTAT-python)" - }, - ) - with urllib.request.urlopen(req, timeout=timeout) as resp: - final_url = str(resp.geturl()) - body = resp.read() - return final_url, body - - -def _resolve_figshare_download_url() -> str: - final_url, body = _http_get(DOI_URL) - if DOWNLOAD_URL_RE.search(final_url): - return final_url - html = body.decode("utf-8", errors="ignore") - match = DOWNLOAD_URL_RE.search(html) - if match: - return match.group(0) - raise RuntimeError( - f"Could not resolve figshare download URL from DOI landing page: {DOI_URL}" - ) - - -def _stream_download(url: str, destination: Path, *, retries: int = 3) -> None: - destination.parent.mkdir(parents=True, exist_ok=True) - last_error: Exception | None = None - for attempt in range(1, retries + 1): - try: - req = urllib.request.Request( - url, - headers={ - "User-Agent": "nSTAT-python-data-manager/1.0 (+https://github.com/cajigaslab/nSTAT-python)" - }, - ) - with urllib.request.urlopen(req, timeout=120.0) as resp, destination.open("wb") as out: - shutil.copyfileobj(resp, out, length=1024 * 1024) - return - except Exception as exc: # pragma: no cover - network timing dependent - last_error = exc - if attempt < retries: - time.sleep(1.5 * attempt) - raise RuntimeError(f"Failed to download dataset from {url}") from last_error - - -def _find_dataset_root(extracted_root: Path) -> Path: - if data_is_present(extracted_root): - return extracted_root - for candidate in extracted_root.rglob("*"): - if candidate.is_dir() and data_is_present(candidate): - return candidate - raise RuntimeError( - "Downloaded archive did not contain expected nSTAT data folders: " - + ", ".join(REQUIRED_SUBDIRS) - ) - - -def _atomic_replace_tree(source: Path, destination: Path) -> None: - destination.parent.mkdir(parents=True, exist_ok=True) - backup = destination.with_name(f"{destination.name}.bak") - if backup.exists(): - shutil.rmtree(backup) - if destination.exists(): - destination.rename(backup) - try: - source.rename(destination) - except Exception: - if destination.exists(): - shutil.rmtree(destination) - if backup.exists(): - backup.rename(destination) - raise - finally: - if backup.exists(): - shutil.rmtree(backup) - - -def ensure_example_data(download: bool = True) -> Path: - """Ensure the canonical example data exists locally and return its path.""" - - data_dir = get_data_dir() - if data_is_present(data_dir): - if not (data_dir / SENTINEL_NAME).exists(): - _write_sentinel(data_dir, source_url="local-existing") - return data_dir - - if not download: - raise FileNotFoundError( - f"Example data not found at {data_dir}. " - "Set NSTAT_DATA_DIR or call ensure_example_data(download=True)." - ) - - # Download to a temp workspace first so partial failures do not pollute - # the final cache path. - download_tmp_root = (_repo_root() / "output" / "data_download").resolve() - download_tmp_root.mkdir(parents=True, exist_ok=True) - work_root = Path(tempfile.mkdtemp(prefix="nstat_data_", dir=str(download_tmp_root))) - try: - archive_path = work_root / "nstat_example_data.zip" - download_url = _resolve_figshare_download_url() - _stream_download(download_url, archive_path) - if not zipfile.is_zipfile(archive_path): - raise RuntimeError(f"Downloaded file is not a valid zip archive: {archive_path}") - extracted_root = work_root / "extracted" - extracted_root.mkdir(parents=True, exist_ok=True) - with zipfile.ZipFile(archive_path) as zf: - zf.extractall(extracted_root) - dataset_root = _find_dataset_root(extracted_root) - staged = work_root / "staged_data" - shutil.copytree(dataset_root, staged) - _atomic_replace_tree(staged, data_dir) - _write_sentinel(data_dir, source_url=download_url) - finally: - shutil.rmtree(work_root, ignore_errors=True) - return data_dir diff --git a/src/nstat/notebook_figures.py b/src/nstat/notebook_figures.py deleted file mode 100644 index f0c49386..00000000 --- a/src/nstat/notebook_figures.py +++ /dev/null @@ -1,125 +0,0 @@ -"""Utilities for deterministic figure creation in generated help notebooks.""" - -from __future__ import annotations - -from dataclasses import dataclass, field -from pathlib import Path -import shutil - -import matplotlib.pyplot as plt -from matplotlib.figure import Figure - - -@dataclass -class FigureTracker: - """Track/snapshot figure creation order for strict ordinal parity checks.""" - - topic: str - output_root: Path - expected_count: int - count: int = 0 - _active_fig: plt.Figure | None = field(default=None, init=False, repr=False) - _active_ax: plt.Axes | None = field(default=None, init=False, repr=False) - _active_ref_image: Path | None = field(default=None, init=False, repr=False) - _note_y: float = field(default=0.95, init=False, repr=False) - _matlab_ref_root: Path | None = field(default=None, init=False, repr=False) - - def __post_init__(self) -> None: - topic_dir = self._topic_dir() - for img_path in topic_dir.glob("fig_*.png"): - img_path.unlink() - self._matlab_ref_root = self.output_root.parent / "matlab_help_images" / self.topic - - def _topic_dir(self) -> Path: - out = self.output_root / self.topic - out.mkdir(parents=True, exist_ok=True) - return out - - def _save_active(self) -> None: - if self._active_fig is None and self._active_ref_image is None: - return - out = self._topic_dir() / f"fig_{self.count:03d}.png" - if self._active_ref_image is not None and self._active_ref_image.exists(): - shutil.copy2(self._active_ref_image, out) - else: - assert self._active_fig is not None - self._active_fig.tight_layout() - self._active_fig.savefig(out, dpi=180) - plt.close(self._active_fig) - self._active_fig = None - self._active_ax = None - self._active_ref_image = None - self._note_y = 0.95 - - def new_figure(self, matlab_line: str | None = None) -> plt.Figure: - """Start a new figure, preserving strict ordinal numbering.""" - - if self.count >= int(self.expected_count): - # Hard cap: once the expected ordinal count is reached, ignore - # additional figure events to preserve 1:1 count parity. - return self._active_fig if self._active_fig is not None else Figure() - - self._save_active() - self.count += 1 - ref_img = None - if self._matlab_ref_root is not None: - candidate = self._matlab_ref_root / f"fig_{self.count:03d}.png" - if candidate.exists(): - ref_img = candidate - if ref_img is not None: - self._active_ref_image = ref_img - self._active_fig = None - self._active_ax = None - self._note_y = 0.95 - return Figure() - fig = plt.figure(figsize=(8, 4.5)) - ax = fig.add_subplot(1, 1, 1) - ax.set_title(f"{self.topic} :: Figure {self.count:03d}") - ax.set_axis_off() - self._active_fig = fig - self._active_ax = ax - self._active_ref_image = None - self._note_y = 0.95 - if matlab_line: - self.annotate(matlab_line) - return fig - - def annotate(self, matlab_line: str) -> None: - if self._active_ref_image is not None: - return - if self._active_fig is None or self._active_ax is None: - if self.count >= int(self.expected_count): - return - self.new_figure(matlab_line=None) - if self._active_ref_image is not None: - return - assert self._active_ax is not None - self._active_ax.text( - 0.02, - self._note_y, - matlab_line[:160], - transform=self._active_ax.transAxes, - fontsize=8, - va="top", - family="monospace", - ) - self._note_y -= 0.08 - if self._note_y < 0.08: - self._note_y = 0.95 - - def finalize(self) -> None: - self._save_active() - while self.count < int(self.expected_count): - self.count += 1 - fig = plt.figure(figsize=(8, 4.5)) - ax = fig.add_subplot(1, 1, 1) - ax.set_title(f"{self.topic} :: Figure {self.count:03d}") - ax.set_axis_off() - out = self._topic_dir() / f"fig_{self.count:03d}.png" - fig.tight_layout() - fig.savefig(out, dpi=180) - plt.close(fig) - if self.count != int(self.expected_count): - raise AssertionError( - f"{self.topic}: produced {self.count} figure(s), expected {self.expected_count}" - ) diff --git a/tests/conftest.py b/tests/conftest.py index 30b590cf..84a0f9ef 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -5,9 +5,5 @@ REPO_ROOT = Path(__file__).resolve().parents[1] -SRC_ROOT = REPO_ROOT / "src" - -if str(SRC_ROOT) not in sys.path: - sys.path.insert(0, str(SRC_ROOT)) if str(REPO_ROOT) not in sys.path: sys.path.insert(0, str(REPO_ROOT)) diff --git a/tests/test_api_surface.py b/tests/test_api_surface.py index 1aef5dc4..6879c6e0 100644 --- a/tests/test_api_surface.py +++ b/tests/test_api_surface.py @@ -11,10 +11,14 @@ def test_canonical_api_imports() -> None: assert nstat.SpikeTrainCollection is not None assert nstat.Trial is not None assert nstat.Analysis is not None + assert nstat.Events is not None assert nstat.FitResult is not None assert nstat.FitSummary is not None assert nstat.CIFModel is not None assert nstat.DecoderSuite is not None + assert nstat.getPaperDataDirs is not None + assert nstat.get_paper_data_dirs is not None + assert nstat.nSTAT_Install is not None def test_compatibility_adapters_emit_deprecation() -> None: diff --git a/tests/test_docs_surface.py b/tests/test_docs_surface.py new file mode 100644 index 00000000..40296bda --- /dev/null +++ b/tests/test_docs_surface.py @@ -0,0 +1,31 @@ +from __future__ import annotations + +from pathlib import Path + +import yaml + + +REPO_ROOT = Path(__file__).resolve().parents[1] +DOCS_CONF_PATH = REPO_ROOT / "docs" / "conf.py" +DOCS_INDEX_PATH = REPO_ROOT / "docs" / "index.rst" +WORKFLOW_PATH = REPO_ROOT / ".github" / "workflows" / "ci.yml" + + +def test_docs_index_includes_paper_examples_page() -> None: + text = DOCS_INDEX_PATH.read_text(encoding="utf-8") + assert "paper_examples" in text + + +def test_docs_conf_enables_markdown_support() -> None: + text = DOCS_CONF_PATH.read_text(encoding="utf-8") + assert 'extensions = ["myst_parser"]' in text + + +def test_ci_runs_docs_build_job() -> None: + payload = yaml.safe_load(WORKFLOW_PATH.read_text(encoding="utf-8")) or {} + jobs = payload.get("jobs", {}) + assert "docs-build" in jobs + steps = jobs["docs-build"].get("steps", []) + run_lines = [step.get("run", "") for step in steps if isinstance(step, dict)] + assert any("build_gallery.py" in line for line in run_lines) + assert any("python -m sphinx -W -b html docs docs/_build/html" in line for line in run_lines) diff --git a/tests/test_install_and_compat.py b/tests/test_install_and_compat.py index bc22037f..ab78eaa0 100644 --- a/tests/test_install_and_compat.py +++ b/tests/test_install_and_compat.py @@ -1,5 +1,10 @@ from __future__ import annotations +from pathlib import Path +from types import SimpleNamespace + +import nstat.data_manager as data_manager +import nstat.install as install_module from nstat.compat.matlab import CIF, Covariate, SignalObj, nspikeTrain, nstColl from nstat.install import nstat_install @@ -13,8 +18,81 @@ def test_matlab_compat_namespace_imports() -> None: def test_nstat_install_report_without_download() -> None: - report = nstat_install(download_example_data=False) + report = nstat_install(download_example_data=False, rebuild_doc_search=False) assert "repo_root" in report assert "example_data" in report + assert "doc_search" in report + assert "path_preferences" in report assert report["download_example_data"] == "never" assert "required_files" in report["example_data"] + assert report["doc_search"]["status"] == "skipped" + assert report["path_preferences"]["status"] == "not_applicable" + + +def test_nstat_install_clean_user_path_prefs_is_documented_noop() -> None: + report = nstat_install(download_example_data=False, rebuild_doc_search=False, clean_user_path_prefs=True) + assert report["clean_user_path_prefs"] is True + assert report["path_preferences"]["requested"] is True + assert report["path_preferences"]["status"] == "not_applicable" + assert "ignored in Python" in " ".join(report["notes"]) + + +def test_rebuild_doc_search_reports_success(monkeypatch, tmp_path: Path) -> None: + docs_dir = tmp_path / "docs" + html_dir = docs_dir / "_build" / "html" + (docs_dir / "conf.py").parent.mkdir(parents=True, exist_ok=True) + (docs_dir / "conf.py").write_text("project='nSTAT'\n", encoding="utf-8") + + monkeypatch.setattr(install_module.importlib.util, "find_spec", lambda name: object()) + + def fake_run(cmd, *, cwd, check, capture_output, text): + assert cmd[:5] == [install_module.sys.executable, "-m", "sphinx", "-q", "-b"] + assert cwd == tmp_path + html_dir.mkdir(parents=True, exist_ok=True) + (html_dir / "searchindex.js").write_text("search", encoding="utf-8") + return SimpleNamespace(returncode=0, stdout="", stderr="") + + monkeypatch.setattr(install_module.subprocess, "run", fake_run) + + report = install_module._rebuild_doc_search(tmp_path) + assert report["status"] == "rebuilt" + assert report["is_available"] is True + assert report["search_index"] == str(html_dir / "searchindex.js") + + +def test_rebuild_doc_search_reports_missing_sphinx(monkeypatch, tmp_path: Path) -> None: + docs_dir = tmp_path / "docs" + docs_dir.mkdir(parents=True, exist_ok=True) + (docs_dir / "conf.py").write_text("project='nSTAT'\n", encoding="utf-8") + + monkeypatch.setattr(install_module.importlib.util, "find_spec", lambda name: None) + + report = install_module._rebuild_doc_search(tmp_path) + assert report["status"] == "skipped" + assert "Sphinx is not installed" in report["reason"] + + +def test_get_paper_data_dirs_api(monkeypatch, tmp_path: Path) -> None: + data_root = tmp_path / "data" + + def fake_ensure_example_data(*, download: bool = True) -> Path: + assert download is False + return data_root + + monkeypatch.setattr(data_manager, "ensure_example_data", fake_ensure_example_data) + + dirs = data_manager.get_paper_data_dirs(download=False) + assert dirs.data_dir == data_root + assert dirs.mepsc_dir == data_root / "mEPSCs" + assert dirs.explicit_stimulus_dir == data_root / "Explicit Stimulus" + assert dirs.psth_dir == data_root / "PSTH" + assert dirs.place_cell_data_dir == data_root / "Place Cells" + + tuple_dirs = data_manager.getPaperDataDirs(download=False) + assert tuple_dirs == ( + data_root, + data_root / "mEPSCs", + data_root / "Explicit Stimulus", + data_root / "PSTH", + data_root / "Place Cells", + ) diff --git a/tests/test_install_cli.py b/tests/test_install_cli.py new file mode 100644 index 00000000..f2303fd0 --- /dev/null +++ b/tests/test_install_cli.py @@ -0,0 +1,70 @@ +from __future__ import annotations + +import json +import subprocess +import sys +from pathlib import Path + + +REPO_ROOT = Path(__file__).resolve().parents[1] + + +def test_python_module_installer_help_exposes_supported_flags() -> None: + proc = subprocess.run( + [sys.executable, "-m", "nstat.install", "--help"], + cwd=REPO_ROOT, + check=False, + capture_output=True, + text=True, + ) + assert proc.returncode == 0, proc.stderr + assert "RuntimeWarning" not in proc.stderr + assert "--download-example-data" in proc.stdout + assert "--no-rebuild-doc-search" in proc.stdout + assert "--clean-user-path-prefs" in proc.stdout + + +def test_python_module_installer_emits_json_report_without_download() -> None: + proc = subprocess.run( + [ + sys.executable, + "-m", + "nstat.install", + "--download-example-data", + "never", + "--no-rebuild-doc-search", + ], + cwd=REPO_ROOT, + check=False, + capture_output=True, + text=True, + ) + assert proc.returncode == 0, proc.stderr + assert "RuntimeWarning" not in proc.stderr + payload = json.loads(proc.stdout) + assert payload["download_example_data"] == "never" + assert payload["doc_search"]["status"] == "skipped" + assert payload["example_data"]["is_installed"] in {True, False} + + +def test_python_module_installer_reports_matlab_compat_noop() -> None: + proc = subprocess.run( + [ + sys.executable, + "-m", + "nstat.install", + "--download-example-data", + "never", + "--no-rebuild-doc-search", + "--clean-user-path-prefs", + ], + cwd=REPO_ROOT, + check=False, + capture_output=True, + text=True, + ) + assert proc.returncode == 0, proc.stderr + assert "RuntimeWarning" not in proc.stderr + payload = json.loads(proc.stdout) + assert payload["path_preferences"]["status"] == "not_applicable" + assert "ignored in Python" in " ".join(payload["notes"]) diff --git a/tests/test_notebook_ci_groups.py b/tests/test_notebook_ci_groups.py new file mode 100644 index 00000000..b7b67096 --- /dev/null +++ b/tests/test_notebook_ci_groups.py @@ -0,0 +1,37 @@ +from __future__ import annotations + +from pathlib import Path + +import yaml + + +REPO_ROOT = Path(__file__).resolve().parents[1] +NOTEBOOK_MANIFEST_PATH = REPO_ROOT / "tools" / "notebooks" / "notebook_manifest.yml" +TOPIC_GROUPS_PATH = REPO_ROOT / "tools" / "notebooks" / "topic_groups.yml" + +REQUIRED_CI_SMOKE_TOPICS = { + "ConfidenceIntervalOverview", + "nSTATPaperExamples", +} + + +def test_ci_smoke_group_covers_required_parity_notebooks() -> None: + notebook_manifest = yaml.safe_load(NOTEBOOK_MANIFEST_PATH.read_text(encoding="utf-8")) or {} + notebook_topics = {row["topic"] for row in notebook_manifest.get("notebooks", [])} + + groups_payload = yaml.safe_load(TOPIC_GROUPS_PATH.read_text(encoding="utf-8")) or {} + ci_smoke = set(groups_payload.get("groups", {}).get("ci_smoke", [])) + + assert REQUIRED_CI_SMOKE_TOPICS <= notebook_topics + assert REQUIRED_CI_SMOKE_TOPICS <= ci_smoke + + +def test_ci_smoke_group_topics_exist_in_notebook_manifest() -> None: + notebook_manifest = yaml.safe_load(NOTEBOOK_MANIFEST_PATH.read_text(encoding="utf-8")) or {} + notebook_topics = {row["topic"] for row in notebook_manifest.get("notebooks", [])} + + groups_payload = yaml.safe_load(TOPIC_GROUPS_PATH.read_text(encoding="utf-8")) or {} + ci_smoke = groups_payload.get("groups", {}).get("ci_smoke", []) + + missing = [topic for topic in ci_smoke if topic not in notebook_topics] + assert not missing, f"CI smoke group references unknown notebook topics: {missing}" diff --git a/tests/test_notebook_surface.py b/tests/test_notebook_surface.py index 307e2047..d4fb3822 100644 --- a/tests/test_notebook_surface.py +++ b/tests/test_notebook_surface.py @@ -3,6 +3,7 @@ from pathlib import Path import nbformat +import yaml REPO_ROOT = Path(__file__).resolve().parents[1] @@ -26,3 +27,13 @@ def test_notebooks_are_python_facing() -> None: def test_readme_catalog_is_python_facing() -> None: readme = (REPO_ROOT / "README.md").read_text(encoding="utf-8") assert "Notebook generated from MATLAB help source" not in readme + + +def test_confidence_interval_overview_is_catalogued() -> None: + notebook_manifest = yaml.safe_load((REPO_ROOT / "tools" / "notebooks" / "notebook_manifest.yml").read_text(encoding="utf-8")) + topics = {row["topic"] for row in notebook_manifest["notebooks"]} + assert "ConfidenceIntervalOverview" in topics + + example_manifest = yaml.safe_load((REPO_ROOT / "examples" / "nSTATPaperExamples" / "manifest.yml").read_text(encoding="utf-8")) + names = {row["name"] for row in example_manifest["examples"]} + assert "ConfidenceIntervalOverview" in names diff --git a/tests/test_paper_example_scripts.py b/tests/test_paper_example_scripts.py new file mode 100644 index 00000000..dfb99b93 --- /dev/null +++ b/tests/test_paper_example_scripts.py @@ -0,0 +1,85 @@ +from __future__ import annotations + +import subprocess +import sys +from pathlib import Path + +import pytest +import yaml + +import nstat.data_manager as data_manager + + +REPO_ROOT = Path(__file__).resolve().parents[1] +MANIFEST_PATH = REPO_ROOT / "examples" / "paper" / "manifest.yml" + +EXPECTED_SCRIPT_NAMES = { + "example01_mepsc_poisson", + "example02_whisker_stimulus_thalamus", + "example03_psth_and_ssglm", + "example04_place_cells_continuous_stimulus", + "example05_decoding_ppaf_pphf", +} + + +def test_paper_example_manifest_covers_canonical_scripts() -> None: + payload = yaml.safe_load(MANIFEST_PATH.read_text(encoding="utf-8")) + entries = payload["examples"] + names = {row["name"] for row in entries} + assert names == EXPECTED_SCRIPT_NAMES + assert [row["example_id"] for row in entries] == ["example01", "example02", "example03", "example04", "example05"] + + +def test_paper_example_scripts_exist_and_support_help() -> None: + payload = yaml.safe_load(MANIFEST_PATH.read_text(encoding="utf-8")) + for row in payload["examples"]: + script_path = REPO_ROOT / row["script"] + assert script_path.exists(), f"Missing paper example script: {script_path}" + proc = subprocess.run( + [sys.executable, str(script_path), "--help"], + cwd=REPO_ROOT, + check=False, + capture_output=True, + text=True, + ) + assert proc.returncode == 0, proc.stderr + assert "--repo-root" in proc.stdout + assert "--export-figures" in proc.stdout + + +def _manifest_rows() -> list[dict[str, object]]: + payload = yaml.safe_load(MANIFEST_PATH.read_text(encoding="utf-8")) + return list(payload["examples"]) + + +def _run_export_smoke(row: dict[str, object], tmp_path: Path) -> None: + example_id = str(row["example_id"]) + script_path = REPO_ROOT / str(row["script"]) + export_dir = tmp_path / example_id + proc = subprocess.run( + [sys.executable, str(script_path), "--export-figures", "--export-dir", str(export_dir)], + cwd=REPO_ROOT, + check=False, + capture_output=True, + text=True, + ) + assert proc.returncode == 0, proc.stderr + for rel_path in row["figure_files"]: + filename = Path(rel_path).name + assert (export_dir / filename).exists(), f"Missing exported figure {filename} for {example_id}" + + +def test_data_free_canonical_example_supports_figure_export(tmp_path: Path) -> None: + rows = _manifest_rows() + example05 = next(row for row in rows if row["example_id"] == "example05") + _run_export_smoke(example05, tmp_path) + + +def test_dataset_backed_canonical_examples_support_figure_export_when_data_available(tmp_path: Path) -> None: + if not data_manager.data_is_present(data_manager.get_data_dir()): + pytest.skip("Dataset-backed paper example export smoke requires preinstalled example data.") + + rows = _manifest_rows() + dataset_backed = [row for row in rows if row["example_id"] != "example05"] + for row in dataset_backed: + _run_export_smoke(row, tmp_path) diff --git a/tests/test_paper_figure_assets.py b/tests/test_paper_figure_assets.py new file mode 100644 index 00000000..ad6cadf4 --- /dev/null +++ b/tests/test_paper_figure_assets.py @@ -0,0 +1,52 @@ +from __future__ import annotations + +from pathlib import Path + +import matplotlib.image as mpimg +import yaml + +from nstat.paper_figures import list_named_paper_figures + + +REPO_ROOT = Path(__file__).resolve().parents[1] +MANIFEST_PATH = REPO_ROOT / "examples" / "paper" / "manifest.yml" + + +def _load_examples() -> list[dict[str, object]]: + payload = yaml.safe_load(MANIFEST_PATH.read_text(encoding="utf-8")) + return list(payload["examples"]) + + +def test_figure_builder_registry_matches_manifest() -> None: + for row in _load_examples(): + expected = [Path(path).name for path in row["figure_files"]] + assert list_named_paper_figures(row["example_id"]) == expected + + +def test_committed_png_sets_match_manifest_exactly() -> None: + for row in _load_examples(): + example_id = row["example_id"] + figure_dir = REPO_ROOT / "docs" / "figures" / example_id + expected = sorted(Path(path).name for path in row["figure_files"]) + committed = sorted(path.name for path in figure_dir.glob("*.png")) + assert committed == expected + + +def test_committed_pngs_are_readable_images() -> None: + for row in _load_examples(): + for rel_path in row["figure_files"]: + image = mpimg.imread(REPO_ROOT / rel_path) + assert image.ndim in (2, 3) + assert image.shape[0] > 100 + assert image.shape[1] > 100 + + +def test_gallery_readmes_embed_every_expected_figure() -> None: + for row in _load_examples(): + example_id = row["example_id"] + readme = (REPO_ROOT / "docs" / "figures" / example_id / "README.md").read_text(encoding="utf-8") + for rel_path in row["figure_files"]: + filename = Path(rel_path).name + stem = Path(rel_path).stem + assert f"### {filename}" in readme + assert f"![{stem}](./{filename})" in readme diff --git a/tests/test_paper_gallery_docs.py b/tests/test_paper_gallery_docs.py new file mode 100644 index 00000000..be117abe --- /dev/null +++ b/tests/test_paper_gallery_docs.py @@ -0,0 +1,62 @@ +from __future__ import annotations + +import json +from pathlib import Path + +import yaml + +from nstat.paper_gallery import build_gallery_manifest, render_paper_examples_markdown + + +REPO_ROOT = Path(__file__).resolve().parents[1] + + +def test_paper_gallery_manifest_is_generated_from_source_manifest() -> None: + committed = json.loads((REPO_ROOT / "docs" / "figures" / "manifest.json").read_text(encoding="utf-8")) + built = build_gallery_manifest(REPO_ROOT) + assert committed == built + + +def test_paper_examples_markdown_matches_generator() -> None: + committed = (REPO_ROOT / "docs" / "paper_examples.md").read_text(encoding="utf-8") + assert committed == render_paper_examples_markdown(REPO_ROOT) + + +def test_gallery_manifest_tracks_thumbnail_and_run_command_fields() -> None: + built = build_gallery_manifest(REPO_ROOT) + for row in built["examples"]: + assert row["thumbnail_file"] == row["figure_files"][0] + assert row["thumbnail_file"].startswith("docs/figures/example") + assert row["run_command"].startswith("python examples/paper/") + + +def test_generated_gallery_markdown_includes_thumbnail_rows_and_run_commands() -> None: + text = render_paper_examples_markdown(REPO_ROOT) + payload = yaml.safe_load((REPO_ROOT / "examples" / "paper" / "manifest.yml").read_text(encoding="utf-8")) or {} + for index, row in enumerate(payload.get("examples", []), start=1): + example_label = f"Example {index:02d}" + thumbnail = str(row["figure_files"][0]).replace("docs/", "") + script = str(row["script"]) + assert f"![{example_label}]({thumbnail})" in text + assert f"`python {script}`" in text + + +def test_paper_gallery_directories_exist() -> None: + for example_id in ("example01", "example02", "example03", "example04", "example05"): + path = REPO_ROOT / "docs" / "figures" / example_id + assert path.is_dir() + assert (path / "README.md").exists() + + +def test_mapped_gallery_directories_have_committed_figure_sets() -> None: + parity = yaml.safe_load((REPO_ROOT / "parity" / "manifest.yml").read_text(encoding="utf-8")) or {} + paper_manifest = yaml.safe_load((REPO_ROOT / "examples" / "paper" / "manifest.yml").read_text(encoding="utf-8")) or {} + figure_map = {row["example_id"]: row["figure_files"] for row in paper_manifest.get("examples", [])} + + for row in parity.get("docs_gallery", []): + target = str(row.get("python_target", "")) + if not target.startswith("docs/figures/example") or row.get("status") != "mapped": + continue + example_id = Path(target).name + for rel_path in figure_map.get(example_id, []): + assert (REPO_ROOT / rel_path).exists(), f"Missing committed gallery figure for mapped example: {rel_path}" diff --git a/tests/test_parity_manifest.py b/tests/test_parity_manifest.py new file mode 100644 index 00000000..871a354b --- /dev/null +++ b/tests/test_parity_manifest.py @@ -0,0 +1,96 @@ +from __future__ import annotations + +from pathlib import Path + +import yaml + + +REPO_ROOT = Path(__file__).resolve().parents[1] +MANIFEST_PATH = REPO_ROOT / "parity" / "manifest.yml" + +EXPECTED_MATLAB_PUBLIC_API = { + "Analysis", + "CIF", + "ConfidenceInterval", + "ConfigColl", + "CovColl", + "Covariate", + "DecodingAlgorithms", + "Events", + "FitResSummary", + "FitResult", + "History", + "SignalObj", + "Trial", + "TrialConfig", + "getPaperDataDirs", + "nSTAT_Install", + "nspikeTrain", + "nstColl", + "nstatOpenHelpPage", +} + +EXPECTED_HELP_WORKFLOWS = { + "AnalysisExamples", + "AnalysisExamples2", + "ConfidenceIntervalOverview", + "ConfigCollExamples", + "CovCollExamples", + "CovariateExamples", + "DecodingExample", + "DecodingExampleWithHist", + "EventsExamples", + "ExplicitStimulusWhiskerData", + "FitResSummaryExamples", + "FitResultExamples", + "FitResultReference", + "HippocampalPlaceCellExample", + "HistoryExamples", + "HybridFilterExample", + "NetworkTutorial", + "PPSimExample", + "PPThinning", + "PSTHEstimation", + "SignalObjExamples", + "StimulusDecode2D", + "TrialConfigExamples", + "TrialExamples", + "ValidationDataSet", + "mEPSCAnalysis", + "nSTATPaperExamples", + "nSpikeTrainExamples", + "nstCollExamples", +} + +VALID_STATUSES = {"mapped", "partial", "missing", "not_applicable"} + + +def _load_manifest() -> dict: + payload = yaml.safe_load(MANIFEST_PATH.read_text(encoding="utf-8")) + assert isinstance(payload, dict) + return payload + + +def test_parity_manifest_public_api_coverage() -> None: + payload = _load_manifest() + entries = payload["public_api"] + names = {row["matlab"] for row in entries} + assert names == EXPECTED_MATLAB_PUBLIC_API + + +def test_parity_manifest_help_workflow_coverage() -> None: + payload = _load_manifest() + entries = payload["help_workflows"] + names = {row["matlab"] for row in entries} + assert names == EXPECTED_HELP_WORKFLOWS + + +def test_parity_manifest_statuses_and_mapped_targets_are_valid() -> None: + payload = _load_manifest() + for section_name in ("public_api", "help_workflows", "paper_examples", "docs_gallery", "installer_setup", "repo_structure"): + for row in payload[section_name]: + status = row["status"] + assert status in VALID_STATUSES + target = row.get("python_target") + if status == "mapped": + assert target, f"Mapped item in {section_name} is missing a python_target: {row}" diff --git a/tests/test_parity_report.py b/tests/test_parity_report.py new file mode 100644 index 00000000..d67d49ca --- /dev/null +++ b/tests/test_parity_report.py @@ -0,0 +1,23 @@ +from __future__ import annotations + +from pathlib import Path + +from nstat.parity_report import render_parity_report + + +REPO_ROOT = Path(__file__).resolve().parents[1] +REPORT_PATH = REPO_ROOT / "parity" / "report.md" + + +def test_committed_parity_report_matches_generator() -> None: + committed = REPORT_PATH.read_text(encoding="utf-8") + assert committed == render_parity_report(REPO_ROOT) + + +def test_parity_report_highlights_current_constraints() -> None: + text = REPORT_PATH.read_text(encoding="utf-8") + assert "no missing MATLAB public APIs remain" in text + assert "paper examples and docs gallery" in text.lower() + assert "all canonical paper examples and committed gallery directories are mapped" in text + assert "No partial or missing items remain." in text + assert "nstatOpenHelpPage" in text diff --git a/tests/test_readme_examples_catalog.py b/tests/test_readme_examples_catalog.py index 432e837c..2e4d13c3 100644 --- a/tests/test_readme_examples_catalog.py +++ b/tests/test_readme_examples_catalog.py @@ -8,19 +8,21 @@ REPO_ROOT = Path(__file__).resolve().parents[1] README_PATH = REPO_ROOT / "README.md" -CATALOG_PATH = REPO_ROOT / "examples" / "nSTATPaperExamples" / "manifest.yml" - - -FEATURED_HEADINGS = [ - "### Example 1 — Single sinusoid: signal + multitaper spectrum + spectrogram", - "### Example 2 — Time-varying CIF over 10 seconds (single-frequency sinusoid)", - "### Example 3 — Spike train collection raster from Example 2", -] - -FEATURED_RUN_COMMANDS = [ - "python examples/readme_examples/example1_multitaper_and_spectrogram.py", - "python examples/readme_examples/example2_simulate_cif_spiketrain_10s.py", - "python examples/readme_examples/example3_nstcoll_raster_from_example2.py", +PAPER_MANIFEST_PATH = REPO_ROOT / "examples" / "paper" / "manifest.yml" + +SUPPLEMENTARY_EXAMPLES = [ + ( + "python examples/readme_examples/example1_multitaper_and_spectrogram.py", + "[PNG](examples/readme_examples/images/readme_example1_multitaper_and_spectrogram.png)", + ), + ( + "python examples/readme_examples/example2_simulate_cif_spiketrain_10s.py", + "[PNG](examples/readme_examples/images/readme_example2_simulate_cif_spiketrain_10s.png)", + ), + ( + "python examples/readme_examples/example3_nstcoll_raster_from_example2.py", + "[PNG](examples/readme_examples/images/readme_example3_nstcoll_raster.png)", + ), ] @@ -31,45 +33,42 @@ def _extract_examples_block(text: str) -> str: return match.group(1) -def test_readme_featured_examples_are_preserved_in_order() -> None: - readme = README_PATH.read_text(encoding="utf-8") - block = _extract_examples_block(readme) - - heading_positions = [] - for heading in FEATURED_HEADINGS: - pos = block.find(heading) - assert pos >= 0, f"Missing featured heading: {heading}" - heading_positions.append(pos) - assert heading_positions == sorted(heading_positions), "Featured examples must remain in the original order." - - for cmd in FEATURED_RUN_COMMANDS: - assert cmd in block, f"Missing featured run command: {cmd}" - +def test_readme_examples_headings_match_gallery_layout() -> None: + block = _extract_examples_block(README_PATH.read_text(encoding="utf-8")) + headings = re.findall(r"^###\s+.+$", block, flags=re.M) + assert headings == [ + "### Paper Examples (Self-Contained)", + "### Supplementary Examples", + ] + assert "python tools/paper_examples/build_gallery.py" in block -def test_readme_includes_complete_nstatpaperexamples_catalog_once() -> None: - readme = README_PATH.read_text(encoding="utf-8") - block = _extract_examples_block(readme) - assert "### nSTATPaperExamples" in block, "README Examples section is missing the nSTATPaperExamples catalog header." - manifest = yaml.safe_load(CATALOG_PATH.read_text(encoding="utf-8")) or {} +def test_readme_paper_example_rows_track_manifest() -> None: + block = _extract_examples_block(README_PATH.read_text(encoding="utf-8")) + manifest = yaml.safe_load(PAPER_MANIFEST_PATH.read_text(encoding="utf-8")) or {} entries = manifest.get("examples", []) - assert entries, "nSTATPaperExamples manifest has no entries." - - for row in entries: - name = str(row["name"]) - rel_path = str(row["relative_path"]) - link = f"[{name}]({rel_path})" - count = block.count(link) - assert count == 1, f"Catalog entry must appear exactly once in README: {link} (found {count})." - - -def test_readme_examples_section_has_no_other_example_groups() -> None: - readme = README_PATH.read_text(encoding="utf-8") - block = _extract_examples_block(readme) - - headings = re.findall(r"^###\s+.+$", block, flags=re.M) - expected = FEATURED_HEADINGS + ["### nSTATPaperExamples"] - assert headings == expected, ( - "README Examples section must contain only the three featured examples " - "followed by the nSTATPaperExamples catalog header." - ) + assert entries, "Paper example manifest has no entries." + + for index, row in enumerate(entries, start=1): + script = str(row["script"]) + question = str(row["question"]) + example_label = f"Example {index:02d}" + thumbnail = str(row["figure_files"][0]) + assert example_label in block + assert question in block + assert f"![{example_label}]({thumbnail})" in block + assert f"`python {script}`" in block + assert f"[Script]({script})" in block + assert f"[Figures](docs/figures/{row['example_id']}/)" in block + + +def test_readme_supplementary_examples_are_preserved() -> None: + block = _extract_examples_block(README_PATH.read_text(encoding="utf-8")) + + positions: list[int] = [] + for command, png_link in SUPPLEMENTARY_EXAMPLES: + pos = block.find(command) + assert pos >= 0, f"Missing supplementary example command: {command}" + positions.append(pos) + assert png_link in block + assert positions == sorted(positions), "Supplementary examples must remain in README order." diff --git a/tests/test_readme_nstatpaperexamples.py b/tests/test_readme_nstatpaperexamples.py index 9e2cb916..a76e88d1 100644 --- a/tests/test_readme_nstatpaperexamples.py +++ b/tests/test_readme_nstatpaperexamples.py @@ -8,11 +8,21 @@ README_PATH = REPO_ROOT / "README.md" -def test_readme_includes_nstatpaperexamples_code_and_figure() -> None: +def test_readme_links_to_generated_paper_gallery_and_figure_dirs() -> None: text = README_PATH.read_text(encoding="utf-8") - match = re.search(r"### nSTATPaperExamples\n(.*?)\n## Documentation\n", text, flags=re.S) - assert match, "README is missing the nSTATPaperExamples block." + match = re.search(r"## Examples\n(.*?)\n## Documentation\n", text, flags=re.S) + assert match, "README is missing the paper examples block." block = match.group(1) - assert "examples/readme_examples/example4_nstatpaperexamples_overview.py" in block - assert "from nstat.paper_examples import run_paper_examples" in block - assert "readme_example4_nstatpaperexamples_overview.png" in block + + assert "[docs/paper_examples.md](docs/paper_examples.md)" in block + + for example_id in ("example01", "example02", "example03", "example04", "example05"): + link = f"[Figures](docs/figures/{example_id}/)" + assert block.count(link) == 1, f"README must link each canonical figure directory exactly once: {link}" + + +def test_readme_no_longer_uses_legacy_nstatpaperexamples_overview_block() -> None: + text = README_PATH.read_text(encoding="utf-8") + assert "### nSTATPaperExamples" not in text + assert "example4_nstatpaperexamples_overview.py" not in text + assert "readme_example4_nstatpaperexamples_overview.png" not in text diff --git a/tests/test_repo_layout.py b/tests/test_repo_layout.py new file mode 100644 index 00000000..7e9f9cab --- /dev/null +++ b/tests/test_repo_layout.py @@ -0,0 +1,12 @@ +from __future__ import annotations + +from pathlib import Path + + +REPO_ROOT = Path(__file__).resolve().parents[1] + + +def test_repo_uses_single_canonical_package_layout() -> None: + assert (REPO_ROOT / "nstat").is_dir() + assert not (REPO_ROOT / "src" / "nstat").exists() + assert not (REPO_ROOT / "__init__.py").exists() diff --git a/tools/notebooks/notebook_manifest.yml b/tools/notebooks/notebook_manifest.yml index 407a5925..6ab169cc 100644 --- a/tools/notebooks/notebook_manifest.yml +++ b/tools/notebooks/notebook_manifest.yml @@ -78,6 +78,9 @@ notebooks: - topic: AnalysisExamples2 file: notebooks/AnalysisExamples2.ipynb run_group: full +- topic: ConfidenceIntervalOverview + file: notebooks/ConfidenceIntervalOverview.ipynb + run_group: smoke - topic: FitResultReference file: notebooks/FitResultReference.ipynb run_group: full diff --git a/tools/notebooks/topic_groups.yml b/tools/notebooks/topic_groups.yml index 9d2a2961..5b863106 100644 --- a/tools/notebooks/topic_groups.yml +++ b/tools/notebooks/topic_groups.yml @@ -1,8 +1,10 @@ version: 1 groups: ci_smoke: + - ConfidenceIntervalOverview - ConfigCollExamples - CovariateExamples + - nSTATPaperExamples - TrialConfigExamples smoke: - AnalysisExamples diff --git a/tools/paper_examples/build_gallery.py b/tools/paper_examples/build_gallery.py new file mode 100644 index 00000000..c516d103 --- /dev/null +++ b/tools/paper_examples/build_gallery.py @@ -0,0 +1,18 @@ +from __future__ import annotations + +import sys +from pathlib import Path + + +THIS_DIR = Path(__file__).resolve().parent +REPO_ROOT = THIS_DIR.parents[1] +if str(REPO_ROOT) not in sys.path: + sys.path.insert(0, str(REPO_ROOT)) + +from nstat.paper_gallery import write_gallery_outputs + + +if __name__ == "__main__": + manifest_path, markdown_path = write_gallery_outputs(REPO_ROOT) + print(manifest_path) + print(markdown_path) diff --git a/tools/parity/build_report.py b/tools/parity/build_report.py new file mode 100644 index 00000000..d22c0bc3 --- /dev/null +++ b/tools/parity/build_report.py @@ -0,0 +1,16 @@ +from __future__ import annotations + +import sys +from pathlib import Path + + +THIS_DIR = Path(__file__).resolve().parent +REPO_ROOT = THIS_DIR.parents[1] +if str(REPO_ROOT) not in sys.path: + sys.path.insert(0, str(REPO_ROOT)) + +from nstat.parity_report import write_parity_report + + +if __name__ == "__main__": + print(write_parity_report(REPO_ROOT))