Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
430 changes: 430 additions & 0 deletions docs/GETTING_STARTED.md

Large diffs are not rendered by default.

423 changes: 423 additions & 0 deletions neurolens.ipynb

Large diffs are not rendered by default.

28 changes: 28 additions & 0 deletions neurolens/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
"""NeuroLens: Interactive neuroscience playground built on TRIBE v2."""

from neurolens.cache import CacheManager
from neurolens.stimulus import Stimulus, StimulusLibrary
from neurolens.predict import get_prediction_at_time, get_num_timesteps, get_top_rois
from neurolens.match import find_similar_stimuli, build_target_from_regions, find_contrast_stimuli
from neurolens.eval import compute_all_model_alignments, compute_model_brain_alignment
from neurolens.roi import ROI_GROUPS, get_roi_group_names, summarize_by_roi_group
from neurolens.viz import plot_brain_surface, make_radar_chart

__all__ = [
"CacheManager",
"Stimulus",
"StimulusLibrary",
"get_prediction_at_time",
"get_num_timesteps",
"get_top_rois",
"find_similar_stimuli",
"build_target_from_regions",
"find_contrast_stimuli",
"compute_all_model_alignments",
"compute_model_brain_alignment",
"ROI_GROUPS",
"get_roi_group_names",
"summarize_by_roi_group",
"plot_brain_surface",
"make_radar_chart",
]
66 changes: 66 additions & 0 deletions neurolens/cache.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
"""CacheManager: load pre-computed brain predictions, ROI summaries, and embeddings."""

import json
from pathlib import Path

import numpy as np
import torch


class CacheManager:
"""Loads cached data from the NeuroLens cache directory.

Expected layout::

cache_dir/
├── brain_preds/{stimulus_id}.npz (key: "preds")
├── roi_summaries/{stimulus_id}.json
└── embeddings/{model_name}/{stimulus_id}.pt
"""

def __init__(self, cache_dir: str | Path) -> None:
self.cache_dir = Path(cache_dir)

def load_brain_preds(self, stimulus_id: str) -> np.ndarray | None:
"""Load brain predictions array of shape (n_timesteps, n_vertices).

Returns None if the file doesn't exist.
"""
path = self.cache_dir / "brain_preds" / f"{stimulus_id}.npz"
if not path.exists():
return None
return np.load(path)["preds"]

def load_roi_summary(self, stimulus_id: str) -> dict[str, float] | None:
"""Load per-ROI-group mean activations.

Returns None if the file doesn't exist.
"""
path = self.cache_dir / "roi_summaries" / f"{stimulus_id}.json"
if not path.exists():
return None
return json.loads(path.read_text())

def load_embedding(self, stimulus_id: str, model_name: str) -> torch.Tensor | None:
"""Load a model embedding tensor.

Returns None if the file doesn't exist.
"""
path = self.cache_dir / "embeddings" / model_name / f"{stimulus_id}.pt"
if not path.exists():
return None
return torch.load(path, map_location="cpu", weights_only=True)

def available_models(self) -> list[str]:
"""Return sorted list of model names that have cached embeddings."""
emb_dir = self.cache_dir / "embeddings"
if not emb_dir.exists():
return []
return sorted(d.name for d in emb_dir.iterdir() if d.is_dir())

def all_brain_pred_ids(self) -> list[str]:
"""Return stimulus ids that have cached brain predictions."""
preds_dir = self.cache_dir / "brain_preds"
if not preds_dir.exists():
return []
return sorted(p.stem for p in preds_dir.glob("*.npz"))
74 changes: 74 additions & 0 deletions neurolens/eval.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
"""Eval module: RSA-based comparison of AI model embeddings to brain predictions."""

from __future__ import annotations

import numpy as np
from scipy.stats import spearmanr

from neurolens.cache import CacheManager


def compute_pairwise_similarity_matrix(vectors: list[np.ndarray]) -> np.ndarray:
"""Compute pairwise cosine similarity matrix for a list of vectors.
Returns np.ndarray of shape (n, n).
"""
mat = np.stack(vectors)
norms = np.linalg.norm(mat, axis=1, keepdims=True)
norms = np.where(norms == 0, 1.0, norms)
mat_normed = mat / norms
return mat_normed @ mat_normed.T


def compute_rsa_score(
sim_matrix_a: np.ndarray,
sim_matrix_b: np.ndarray,
) -> float:
"""Compute RSA score: Spearman correlation between upper triangles.
Returns float: Spearman correlation coefficient.
"""
n = sim_matrix_a.shape[0]
idx = np.triu_indices(n, k=1)
vec_a = sim_matrix_a[idx]
vec_b = sim_matrix_b[idx]
corr, _ = spearmanr(vec_a, vec_b)
return float(corr)


def compute_model_brain_alignment(
cache: CacheManager,
model_name: str,
stimulus_ids: list[str],
) -> float:
"""Compute overall brain alignment score for a model using RSA.
Returns float: RSA alignment score in [-1, 1].
"""
embeddings = []
brain_vecs = []
for sid in stimulus_ids:
emb = cache.load_embedding(sid, model_name)
preds = cache.load_brain_preds(sid)
if emb is None or preds is None:
continue
embeddings.append(emb.numpy())
brain_vecs.append(preds.mean(axis=0))

if len(embeddings) < 3:
return 0.0

emb_sim = compute_pairwise_similarity_matrix(embeddings)
brain_sim = compute_pairwise_similarity_matrix(brain_vecs)
return compute_rsa_score(emb_sim, brain_sim)


def compute_all_model_alignments(
cache: CacheManager,
stimulus_ids: list[str],
) -> dict[str, float]:
"""Compute brain alignment scores for all available models.
Returns dict mapping model_name to RSA score, sorted descending.
"""
models = cache.available_models()
scores = {}
for model_name in models:
scores[model_name] = compute_model_brain_alignment(cache, model_name, stimulus_ids)
return dict(sorted(scores.items(), key=lambda x: x[1], reverse=True))
Loading