Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -197,3 +197,18 @@ jobs:
python -m pip install -e .[dev]
- name: Run API surface checks
run: pytest -q tests/test_api_surface.py

symbol-surface-audit:
runs-on: ubuntu-latest

steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: "3.11"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
python -m pip install -e .[dev]
- name: Verify audited MATLAB-facing runtime surface
run: pytest -q tests/test_matlab_symbol_surface.py tests/test_class_fidelity_audit.py tests/test_parity_report.py
119 changes: 119 additions & 0 deletions nstat/class_fidelity.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
from __future__ import annotations

import importlib
from pathlib import Path
from typing import Any

import yaml


EXPECTED_RUNTIME_MEMBERS: dict[str, tuple[str, ...]] = {
"nstat.Analysis": (
"GLMFit",
"RunAnalysisForNeuron",
"RunAnalysisForAllNeurons",
"KSPlot",
"computeKSStats",
"computeFitResidual",
"plotFitResidual",
"plotInvGausTrans",
"plotSeqCorr",
"plotCoeffs",
),
"nstat.CIF": (
"setSpikeTrain",
"setHistory",
"simulateCIF",
"simulateCIFByThinning",
"simulateCIFByThinningFromLambda",
"evalGradient",
"evalGradientLog",
"evalJacobian",
"evalJacobianLog",
"evalGradientLDGamma",
"evalJacobianLDGamma",
),
"nstat.DecodingAlgorithms": (
"PPDecode_predict",
"PPDecode_update",
"PPDecode_updateLinear",
"PPDecodeFilterLinear",
"PPDecodeFilter",
"PP_fixedIntervalSmoother",
"PPHybridFilterLinear",
"PPHybridFilter",
),
}


def _repo_root() -> Path:
return Path(__file__).resolve().parents[1]


def load_class_fidelity_audit(repo_root: Path | None = None) -> dict[str, Any]:
base = _repo_root() if repo_root is None else repo_root.resolve()
path = base / "parity" / "class_fidelity.yml"
return yaml.safe_load(path.read_text(encoding="utf-8"))


def resolve_public_symbol(dotted_name: str | None) -> Any | None:
if not dotted_name:
return None
parts = [part for part in str(dotted_name).split(".") if part]
if not parts:
return None
obj: Any = importlib.import_module(parts[0])
for part in parts[1:]:
obj = getattr(obj, part)
return obj


def _coerce_verified_flag(value: Any) -> bool:
if isinstance(value, bool):
return value
return str(value).strip().lower() in {"1", "true", "yes"}


def row_runtime_symbol_verified(row: dict[str, Any]) -> bool:
public_name = row.get("python_public_name")
symbol = resolve_public_symbol(public_name)
if symbol is None:
return False
required_members = EXPECTED_RUNTIME_MEMBERS.get(str(public_name), ())
return all(callable(getattr(symbol, member, None)) for member in required_members)


def row_audit_symbol_verified(row: dict[str, Any]) -> bool:
return _coerce_verified_flag(row.get("symbol_presence_verified"))


def iter_symbol_presence_mismatches(payload: dict[str, Any]) -> list[dict[str, Any]]:
mismatches: list[dict[str, Any]] = []
for row in payload.get("items", []):
expected = row_runtime_symbol_verified(row)
if row_audit_symbol_verified(row) != expected:
mismatches.append(row)
return mismatches


def summarize_symbol_presence(payload: dict[str, Any]) -> dict[str, int]:
counts = {"verified": 0, "unverified": 0, "not_applicable": 0}
for row in payload.get("items", []):
if not row.get("python_public_name") or row.get("status") == "not_applicable":
counts["not_applicable"] += 1
elif row_runtime_symbol_verified(row):
counts["verified"] += 1
else:
counts["unverified"] += 1
return counts


__all__ = [
"EXPECTED_RUNTIME_MEMBERS",
"iter_symbol_presence_mismatches",
"load_class_fidelity_audit",
"resolve_public_symbol",
"row_audit_symbol_verified",
"row_runtime_symbol_verified",
"summarize_symbol_presence",
]
33 changes: 32 additions & 1 deletion nstat/matlab_reference.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,8 +104,34 @@ def run_simulated_network_reference(*, matlab_repo: str | Path | None = None, se
assignin('base','S2',S{2}); assignin('base','H2',H{2}); assignin('base','E2',E{2}); assignin('base','mu2',mu{2});
options = simget;
[tout,~,yout] = sim('SimulatedNetwork2',[stim.minTime stim.maxTime],options,stim.dataToStructure);
[h1Num, ~] = tfdata(H{1}, 'v');
[h2Num, ~] = tfdata(H{2}, 'v');
[s1Num, ~] = tfdata(S{1}, 'v');
[s2Num, ~] = tfdata(S{2}, 'v');
[e1Num, ~] = tfdata(E{1}, 'v');
[e2Num, ~] = tfdata(E{2}, 'v');
stateMat = yout(:,1:2);
probMat = zeros(size(stateMat));
for n = 1:size(stateMat, 1)
hist1 = 0; hist2 = 0;
for lag = 1:length(h1Num)
if n-lag >= 1
hist1 = hist1 + h1Num(lag) * stateMat(n-lag,1);
hist2 = hist2 + h2Num(lag) * stateMat(n-lag,2);
end
end
ens1 = 0; ens2 = 0;
if n > 1
ens1 = e1Num(1) * stateMat(n-1,2);
ens2 = e2Num(1) * stateMat(n-1,1);
end
eta1 = mu{1} + hist1 + s1Num(1) * u(n) + ens1;
eta2 = mu{2} + hist2 + s2Num(1) * u(n) + ens2;
probMat(n,1) = exp(eta1) / (1 + exp(eta1));
probMat(n,2) = exp(eta2) / (1 + exp(eta2));
end
netSpikeCounts = [sum(yout(:,1)>.5), sum(yout(:,2)>.5)];
netProbHead = yout(1:5,3:4);
netProbHead = probMat(1:5,:);
netStateHead = yout(1:5,1:2);
netActual = [0 1; -4 0];
""",
Expand Down Expand Up @@ -134,11 +160,14 @@ def run_analysis_reference(*, matlab_repo: str | Path | None = None) -> dict[str
cfg = TrialConfig({{'Stimulus', 'stim'}}, 10, [], []);
cfg.setName('stim');
fit = Analysis.RunAnalysisForNeuron(trial, 1, ConfigColl({cfg}));
summary = FitResSummary({fit});
analysisAIC = fit.AIC(1);
analysisBIC = fit.BIC(1);
analysisLogLL = fit.logLL(1);
analysisCoeffs = fit.getCoeffs(1)';
analysisLambdaHead = fit.lambda.data(1:5, 1)';
analysisSummaryAIC = summary.AIC(1);
analysisSummaryBIC = summary.BIC(1);
""",
nargout=0,
)
Expand All @@ -148,6 +177,8 @@ def run_analysis_reference(*, matlab_repo: str | Path | None = None) -> dict[str
"logll": _to_numpy(engine.workspace["analysisLogLL"]).reshape(-1),
"coeffs": _to_numpy(engine.workspace["analysisCoeffs"]).reshape(-1),
"lambda_head": _to_numpy(engine.workspace["analysisLambdaHead"]).reshape(-1),
"summary_aic": _to_numpy(engine.workspace["analysisSummaryAIC"]).reshape(-1),
"summary_bic": _to_numpy(engine.workspace["analysisSummaryBIC"]).reshape(-1),
}


Expand Down
45 changes: 38 additions & 7 deletions nstat/parity_report.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,11 @@

import yaml

from nstat.class_fidelity import (
iter_symbol_presence_mismatches,
load_class_fidelity_audit,
summarize_symbol_presence,
)
from nstat.notebook_parity import (
iter_outstanding_notebook_fidelity,
load_notebook_parity_notes,
Expand Down Expand Up @@ -42,12 +47,6 @@ def load_parity_manifest(repo_root: Path | None = None) -> dict[str, Any]:
return yaml.safe_load(path.read_text(encoding="utf-8"))


def load_class_fidelity_audit(repo_root: Path | None = None) -> dict[str, Any]:
base = _repo_root() if repo_root is None else repo_root.resolve()
path = base / "parity" / "class_fidelity.yml"
return yaml.safe_load(path.read_text(encoding="utf-8"))


def _summarize_class_fidelity(payload: dict[str, Any]) -> dict[str, int]:
counts = {status: 0 for status in payload.get("status_legend", [])}
for row in payload.get("items", []):
Expand Down Expand Up @@ -86,6 +85,8 @@ def render_parity_report(repo_root: Path | None = None) -> str:
notebook_fidelity = load_notebook_parity_notes(repo_root)
simulink_fidelity = load_simulink_fidelity_audit(repo_root)
class_counts = _summarize_class_fidelity(class_fidelity)
symbol_counts = summarize_symbol_presence(class_fidelity)
symbol_mismatches = iter_symbol_presence_mismatches(class_fidelity)
notebook_counts = summarize_notebook_fidelity(notebook_fidelity)
notebook_partial = iter_outstanding_notebook_fidelity(notebook_fidelity)
simulink_counts = summarize_simulink_strategies(simulink_fidelity)
Expand All @@ -99,7 +100,7 @@ def render_parity_report(repo_root: Path | None = None) -> str:
lines = [
"# nSTAT Python Parity Report",
"",
"Generated from `parity/manifest.yml`, `parity/class_fidelity.yml`, and `tools/notebooks/parity_notes.yml`.",
"Generated from `parity/manifest.yml`, `parity/class_fidelity.yml`, `tools/notebooks/parity_notes.yml`, and live runtime inspection of the audited Python public surface.",
"",
f"- MATLAB reference: {payload['source_repositories']['matlab']}",
f"- Python target: {payload['source_repositories']['python']}",
Expand Down Expand Up @@ -130,6 +131,19 @@ def render_parity_report(repo_root: Path | None = None) -> str:
for status in class_fidelity.get("status_legend", []):
lines.append(f"| `{status}` | {class_counts.get(status, 0)} |")

lines.extend(
[
"",
"## Runtime Symbol Verification",
"",
"| Status | Count |",
"|---|---:|",
f"| `verified` | {symbol_counts['verified']} |",
f"| `unverified` | {symbol_counts['unverified']} |",
f"| `not_applicable` | {symbol_counts['not_applicable']} |",
]
)

lines.extend(
[
"",
Expand Down Expand Up @@ -183,6 +197,12 @@ def render_parity_report(repo_root: Path | None = None) -> str:
lines.append(
"- Class fidelity: mapping parity is ahead of semantic parity; the audit still reports partial fidelity for several MATLAB-facing classes and workflows."
)
if not symbol_mismatches:
lines.append("- Runtime symbol verification: every audited MATLAB-facing Python symbol marked present in `parity/class_fidelity.yml` resolves on the live public surface.")
else:
lines.append(
f"- Runtime symbol verification: {len(symbol_mismatches)} audited MATLAB-facing entries do not currently resolve on the live public surface."
)
if simulink_outstanding:
lines.append(
f"- Simulink fidelity: {len(simulink_outstanding)} Simulink-backed assets still rely on partial, fallback, or unsupported Python execution paths."
Expand Down Expand Up @@ -237,6 +257,17 @@ def render_parity_report(repo_root: Path | None = None) -> str:
detail = recommendation_text or note
lines.append(f"- `{label}` -> `{python_target}` [{row['status']}]: {detail}")

lines.extend(["", "## Runtime Symbol Drift", ""])
if not symbol_mismatches:
lines.append("No audit/runtime symbol mismatches were detected.")
else:
for row in symbol_mismatches:
label = row.get("matlab_name") or row.get("python_public_name") or row.get("matlab_path")
public_name = row.get("python_public_name") or "None"
lines.append(
f"- `{label}` -> `{public_name}`: `symbol_presence_verified` does not match live runtime resolution."
)

lines.extend(["", "## Simulink Fidelity Deltas", ""])
if not simulink_outstanding and not simulink_reference_only:
lines.append("No partial, reference-only, fallback, or unsupported Simulink execution paths remain in the audit.")
Expand Down
2 changes: 2 additions & 0 deletions nstat/release_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,8 @@ def build_release_gate_commands(
"tests/test_signalobj_fidelity.py",
"tests/test_nspiketrain_fidelity.py",
"tests/test_workflow_fidelity.py",
"tests/test_class_fidelity_audit.py",
"tests/test_matlab_symbol_surface.py",
"tests/test_matlab_reference.py",
"tests/test_simulink_fidelity_audit.py",
"tests/test_parity_report.py",
Expand Down
8 changes: 7 additions & 1 deletion parity/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,15 @@ Run the combined Python plus MATLAB release gate:
nstat-release-check --matlab-repo ../nSTAT
```

Run the MATLAB-side `pyenv` fidelity suite from the sibling MATLAB repo:

```bash
matlab -batch "cd('../nSTAT'); addpath(fullfile(pwd,'tools','python')); results = runtests('tests/python_port_fidelity'); assertSuccess(results); exit"
```

Current headline status:
- Public API coverage matches the MATLAB inventory except for the explicitly non-applicable `nstatOpenHelpPage`.
- Class-fidelity auditing is tracked separately from name-mapping parity in `class_fidelity.yml`, and it remains intentionally stricter and more conservative than the mapping manifest.
- Class-fidelity auditing is tracked separately from name-mapping parity in `class_fidelity.yml`, and it now records `symbol_presence_verified` so the audit can distinguish prose parity from live runtime symbol resolution.
- Simulink-backed workflows are inventoried separately in `simulink_fidelity.yml` so model-dependent execution paths are not conflated with native Python parity.
- Help/notebook parity covers the inventoried MATLAB help workflow surface, including the top-level `NeuralSpikeAnalysis_top`, `PaperOverview`, `Examples`, and `ClassDefinitions` navigation pages.
- Canonical paper examples, gallery structure, and README/docs presentation are committed and mapped in Python.
Expand Down
Loading