From 61d641e2319ab9d8b79d0714e3c20df20e9fd4ff Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 26 Aug 2025 15:06:57 +0000 Subject: [PATCH 1/2] Initial plan From 0886284bb92b985c2fef8358358631fb69008a74 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 26 Aug 2025 15:18:18 +0000 Subject: [PATCH 2/2] Fix torch.nonzero inconsistency and n-gram range bug in evaluation.py Co-authored-by: Hananel-Hazan <3954715+Hananel-Hazan@users.noreply.github.com> --- bindsnet/evaluation/evaluation.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bindsnet/evaluation/evaluation.py b/bindsnet/evaluation/evaluation.py index 5271d762..950ad05d 100644 --- a/bindsnet/evaluation/evaluation.py +++ b/bindsnet/evaluation/evaluation.py @@ -195,13 +195,13 @@ def ngram( # Aggregate all of the firing neurons' indices fire_order = [] - for t in range(activity.size()[0]): - ordering = torch.nonzero(activity[t].view(-1)) + for t in range(activity.size(0)): + ordering = torch.nonzero(activity[t]).view(-1) if ordering.numel() > 0: - fire_order += ordering[:, 0].tolist() + fire_order += ordering.tolist() # Consider all n-gram sequences. - for j in range(len(fire_order) - n): + for j in range(len(fire_order) - n + 1): if tuple(fire_order[j : j + n]) in ngram_scores: score += ngram_scores[tuple(fire_order[j : j + n])]