From 9e0290d2a401f2deddf19597854edd2b6c80a150 Mon Sep 17 00:00:00 2001 From: LEGion-42 Date: Mon, 29 Apr 2024 00:42:39 -0400 Subject: [PATCH 01/24] fix: typos --- neo/rawio/neuralynxrawio/neuralynxrawio.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neo/rawio/neuralynxrawio/neuralynxrawio.py b/neo/rawio/neuralynxrawio/neuralynxrawio.py index 1f6367ee5..00283b458 100644 --- a/neo/rawio/neuralynxrawio/neuralynxrawio.py +++ b/neo/rawio/neuralynxrawio/neuralynxrawio.py @@ -87,8 +87,8 @@ class NeuralynxRawIO(BaseRawIO): Otherwise set 0 of time to first time in dataset strict_gap_mode: bool, default: True Detect gaps using strict mode or not. - * strict_gap_mode = True then a gap is consider when timstamp difference between two - consequtive data packet is more than one sample interval. + * strict_gap_mode = True then a gap is consider when timestamp difference between two + consecutive data packet is more than one sample interval. * strict_gap_mode = False then a gap has an increased tolerance. Some new system with different clock need this option otherwise, too many gaps are detected From 523e999d38363a0b65c3bbb7210efb02c063b6e1 Mon Sep 17 00:00:00 2001 From: LEGion-42 Date: Thu, 23 May 2024 11:58:55 -0400 Subject: [PATCH 02/24] fix: correct regex in NlxHeader, previous regex doesn't register multiple values feat: add header support for Neuralynx nvt files --- neo/rawio/neuralynxrawio/nlxheader.py | 50 +++++++++++++++++++++++++-- 1 file changed, 48 insertions(+), 2 deletions(-) diff --git a/neo/rawio/neuralynxrawio/nlxheader.py b/neo/rawio/neuralynxrawio/nlxheader.py index 8d1de1ca2..d8b43feaa 100644 --- a/neo/rawio/neuralynxrawio/nlxheader.py +++ b/neo/rawio/neuralynxrawio/nlxheader.py @@ -66,6 +66,21 @@ def _to_bool(txt): ("AcquisitionSystem", "", None), ("ReferenceChannel", "", None), ("NLX_Base_Class_Type", "", None), # in version 4 and earlier versions of Cheetah + ("VideoFormat", "", None), + ("IntensityThreshold", "", None), + ("RedThreshold", "", None), + ("GreenThreshold", "", None), + ("BlueThreshold", "", None), + ("Saturation", "", int), + ("Hue", "", int), + ("Brightness", "", int), + ("Contrast", "", int), + ("Sharpness", "", int), + ("DirectionOffset", "", int), + ("Resolution", "", None), + ("CameraDelay", "", int), + ("EnableFieldEstimation", "field_estimation_enabled", _to_bool), + ("TargetDist", "", None), ] # Filename and datetime may appear in header lines starting with # at @@ -170,16 +185,17 @@ def read_properties(self, filename, txt_header): :param filename: name of ncs file, used for extracting channel number :param txt_header: header text """ + print(txt_header) # find keys for k1, k2, type_ in NlxHeader.txt_header_keys: - pattern = r"-(?P" + k1 + r")\s+(?P[\S ]*)" + pattern = r"-(?P" + k1 + r")\s+(?P.+)" matches = re.findall(pattern, txt_header) for match in matches: if k2 == "": name = match[0] else: name = k2 - value = match[1].rstrip(" ") + value = match[1].replace("\t", " ").replace("\r", "").rstrip(" ") if type_ is not None: value = type_(value) self[name] = value @@ -243,6 +259,36 @@ def read_properties(self, filename, txt_header): assert len(self["InputRange"]) == len( chid_entries ), "Number of channel ids does not match input range values." + if "Resolution" in self: + ir_entries = re.findall(r"\w+", self["Resolution"]) + if len(ir_entries) == 1: + self["Resolution"] = [int(ir_entries[0])] * len(chid_entries) + else: + self["Resolution"] = [int(e) for e in ir_entries] + if "IntensityThreshold" in self: + ir_entries = re.findall(r"\w+", self["IntensityThreshold"]) + if len(ir_entries) == 1: + self["IntensityThreshold"] = [int(ir_entries[0])] * len(chid_entries) + else: + self["IntensityThreshold"] = [int(e) for e in ir_entries] + if "RedThreshold" in self: + ir_entries = re.findall(r"\w+", self["RedThreshold"]) + if len(ir_entries) == 1: + self["RedThreshold"] = [int(ir_entries[0])] * len(chid_entries) + else: + self["RedThreshold"] = [int(e) for e in ir_entries] + if "GreenThreshold" in self: + ir_entries = re.findall(r"\w+", self["GreenThreshold"]) + if len(ir_entries) == 1: + self["GreenThreshold"] = [int(ir_entries[0])] * len(chid_entries) + else: + self["GreenThreshold"] = [int(e) for e in ir_entries] + if "BlueThreshold" in self: + ir_entries = re.findall(r"\w+", self["BlueThreshold"]) + if len(ir_entries) == 1: + self["BlueThreshold"] = [int(ir_entries[0])] * len(chid_entries) + else: + self["BlueThreshold"] = [int(e) for e in ir_entries] def readTimeDate(self, txt_header): """ From 4133df65de3a4106201a857cbca4e69fcb1322eb Mon Sep 17 00:00:00 2001 From: LEGion-42 Date: Thu, 23 May 2024 12:08:17 -0400 Subject: [PATCH 03/24] fix: remove a print statement for debugging purpose --- neo/rawio/neuralynxrawio/nlxheader.py | 1 - 1 file changed, 1 deletion(-) diff --git a/neo/rawio/neuralynxrawio/nlxheader.py b/neo/rawio/neuralynxrawio/nlxheader.py index d8b43feaa..45caadfbb 100644 --- a/neo/rawio/neuralynxrawio/nlxheader.py +++ b/neo/rawio/neuralynxrawio/nlxheader.py @@ -185,7 +185,6 @@ def read_properties(self, filename, txt_header): :param filename: name of ncs file, used for extracting channel number :param txt_header: header text """ - print(txt_header) # find keys for k1, k2, type_ in NlxHeader.txt_header_keys: pattern = r"-(?P" + k1 + r")\s+(?P.+)" From bb9d891a4f4a0055d8c55209ed4651a5e56d352c Mon Sep 17 00:00:00 2001 From: LEGion-42 Date: Sat, 25 May 2024 00:06:02 -0400 Subject: [PATCH 04/24] feat(NeuralynxRawIO): finish most work for nvt file IO, some bugs still need fixing --- neo/rawio/neuralynxrawio/neuralynxrawio.py | 229 ++++++++++++++++++--- neo/rawio/neuralynxrawio/nlxheader.py | 4 +- 2 files changed, 198 insertions(+), 35 deletions(-) diff --git a/neo/rawio/neuralynxrawio/neuralynxrawio.py b/neo/rawio/neuralynxrawio/neuralynxrawio.py index 00283b458..5028d145d 100644 --- a/neo/rawio/neuralynxrawio/neuralynxrawio.py +++ b/neo/rawio/neuralynxrawio/neuralynxrawio.py @@ -47,8 +47,8 @@ from ..baserawio import ( BaseRawIO, - _signal_channel_dtype, _signal_stream_dtype, + _signal_channel_dtype, _spike_channel_dtype, _event_channel_dtype, ) @@ -131,6 +131,20 @@ class NeuralynxRawIO(BaseRawIO): ("samples", "int16", (NcsSection._RECORD_SIZE)), ] + _nvt_dtype = [ + ("swstx", "uint16"), + ("system_id", "uint16"), + ("data_size", "uint16"), + ("timestamp", "uint64"), + ("bitfield_points", "uint32", (400,)), + ("unused", "int16"), + ("test", "int32", (3,)), + # ("x_location", "int32"), + # ("y_location", "int32"), + # ("head_angle", "int32"), + ("colored_tgts", "int32", (50,)), + ] + def __init__( self, dirname="", filename="", exclude_filename=None, keep_original_times=False, strict_gap_mode=True, **kargs ): @@ -156,6 +170,12 @@ def _source_name(self): return self.dirname def _parse_header(self): + _ncs_sample_dtype = [dtype[1] for dtype in NeuralynxRawIO._ncs_dtype if dtype[0] == "samples"][0] + # _nvt_sample_dtype = [dtype[1] for dtype in NeuralynxRawIO._nvt_dtype if dtype[0] == "x_location"][0] + _nvt_sample_dtype = "int32" + + ncs_unit = "#packet" + nvt_unit = "#frame" stream_channels = [] signal_channels = [] @@ -165,16 +185,20 @@ def _parse_header(self): self.ncs_filenames = OrderedDict() # (chan_name, chan_id): filename self.nse_ntt_filenames = OrderedDict() # (chan_name, chan_id): filename self.nev_filenames = OrderedDict() # chan_id: filename + self.nvt_filenames = OrderedDict() # chan_id: filename self.file_headers = OrderedDict() # filename: file header dict self._nev_memmap = {} self._spike_memmap = {} + self._nvt_memmaps = [] self.internal_unit_ids = [] # channel_index > ((channel_name, channel_id), unit_id) self.internal_event_ids = [] + self.tracker_system_ids = [] self._empty_ncs = [] # this list contains filenames of empty files self._empty_nev = [] self._empty_nse_ntt = [] + self._empty_nvt = [] # Explore the directory looking for ncs, nev, nse and ntt # and construct channels headers. @@ -218,8 +242,11 @@ def _parse_header(self): # Skip Ncs files with only header. Other empty file types # will have an empty dataset constructed later. - if (os.path.getsize(filename) <= NlxHeader.HEADER_SIZE) and ext in ["ncs"]: - self._empty_ncs.append(filename) + if (os.path.getsize(filename) <= NlxHeader.HEADER_SIZE) and ext in ["ncs", "nvt"]: + if ext == "ncs": + self._empty_ncs.append(filename) + elif ext == "nvt": + self._empty_nvt.append(filename) continue # All file have more or less the same header structure @@ -253,7 +280,7 @@ def _parse_header(self): gain *= -1 offset = 0.0 signal_channels.append( - (chan_name, str(chan_id), info["sampling_rate"], "int16", units, gain, offset, stream_id) + (chan_name, str(chan_id), info["sampling_rate"], _ncs_sample_dtype, units, gain, offset, stream_id) ) self.ncs_filenames[chan_uid] = filename keys = [ @@ -340,6 +367,50 @@ def _parse_header(self): self._nev_memmap[chan_id] = data + # nvt files are passed as signals bundled into signal streams + elif ext == "nvt": + file_mmap = self._get_file_map(filename) + + system_id = np.unique(file_mmap["system_id"]).item() + chan_uid = (chan_name, str(system_id)) + self.nvt_filenames[chan_uid] = filename + + n_frames = copy.copy(file_mmap.shape[0]) + if n_frames: + t_start = copy.copy(file_mmap[0][3]) + if system_id not in self.tracker_system_ids: + self.tracker_system_ids.append(system_id) + else: + t_start = 0 + + stream_prop = (info["sampling_rate"], n_frames, t_start) + + if stream_prop not in stream_props: + stream_props[stream_prop] = {"stream_id": len(stream_props), "filenames": [filename]} + else: + stream_props[stream_prop]["filenames"].append(filename) + stream_id = stream_props[stream_prop]["stream_id"] + + units = "dimensionless" + gain = 1.0 + offset = info["CameraDelay"] # NOTE: assuming that the offset means time offset + signal_channels.append( + (chan_name, str(system_id), info["sampling_rate"], _nvt_sample_dtype, units, gain, offset, stream_id) + ) + + # NOTE: only loading the selected features here. "bitfield_points" and "colored_tgts" are not loaded due to their dimensionality + # self._nvt_memmap[chan_id] = file_mmap[["timestamp", "system_id", "x_location", "y_location", "head_angle"]] + self._nvt_memmaps.append({chan_uid : file_mmap[["timestamp", "system_id", "test"]]}) + # print(self._nvt_memmap[chan_id][:5]) + + keys = [ + "recording_opened", + "VideoFormat", + "Resolution", + ] + d = {k: info[k] for k in keys if k in info} + signal_annotations.append(d) + signal_channels = np.array(signal_channels, dtype=_signal_channel_dtype) spike_channels = np.array(spike_channels, dtype=_spike_channel_dtype) event_channels = np.array(event_channels, dtype=_event_channel_dtype) @@ -347,7 +418,10 @@ def _parse_header(self): if signal_channels.size > 0: # ordering streams according from high to low sampling rates stream_props = {k: stream_props[k] for k in sorted(stream_props, reverse=True)} - names = [f"Stream (rate,#packet,t0): {sp}" for sp in stream_props] + # assign different names to ncs stream and nvt stream + names = [f"Stream (rate,{ncs_unit},t0): {sp}" if pathlib.Path(stream_props[sp]["filenames"][0]).suffix.lower()[1:] == "ncs" else + f"Stream (rate,{nvt_unit},t0): {sp}" + for sp in stream_props] ids = [stream_prop["stream_id"] for stream_prop in stream_props.values()] signal_streams = list(zip(names, ids)) else: @@ -359,26 +433,39 @@ def _parse_header(self): self._timestamp_limits = None self._nb_segment = 1 - stream_infos = {} + ncs_stream_infos = {} + nvt_stream_infos = {} # Read ncs files of each stream for gap detection and nb_segment computation. + # Since nvt files are passed as signals, we need to filter them out. for stream_id in np.unique(signal_channels["stream_id"]): stream_channels = signal_channels[signal_channels["stream_id"] == stream_id] stream_chan_uids = zip(stream_channels["name"], stream_channels["id"]) - stream_filenames = [self.ncs_filenames[chuid] for chuid in stream_chan_uids] - _sigs_memmaps, ncsSegTimestampLimits, section_structure = self.scan_stream_ncs_files(stream_filenames) - - stream_infos[stream_id] = { - "segment_sig_memmaps": _sigs_memmaps, - "ncs_segment_infos": ncsSegTimestampLimits, - "section_structure": section_structure, - } + # ncs files have dtype int16 while nvt files have dtype int32, so we use this to filter out nvt files + if (stream_channels["dtype"] == _ncs_sample_dtype).all(): + stream_filenames = [self.ncs_filenames[chuid] for chuid in stream_chan_uids] + _sigs_memmaps, ncsSegTimestampLimits, section_structure = self.scan_stream_ncs_files(stream_filenames) + + ncs_stream_infos[stream_id] = { + "segment_sig_memmaps": _sigs_memmaps, + "ncs_segment_infos": ncsSegTimestampLimits, + "section_structure": section_structure, + } + + else: + stream_filenames = [self.nvt_filenames[chuid] for chuid in stream_chan_uids] + + nvt_stream_infos[stream_id] = { + "segment_sig_memmaps": self._nvt_memmaps, + "nvt_segment_infos": self.generate_nvt_seg_infos(), + "section_structure": None, + } # check if section structure across streams is compatible and merge infos ref_sec_structure = None - for stream_id, stream_info in stream_infos.items(): - ref_stream_id = list(stream_infos.keys())[0] - ref_sec_structure = stream_infos[ref_stream_id]["section_structure"] + for stream_id, stream_info in ncs_stream_infos.items(): + ref_stream_id = list(ncs_stream_infos.keys())[0] + ref_sec_structure = ncs_stream_infos[ref_stream_id]["section_structure"] sec_structure = stream_info["section_structure"] @@ -395,8 +482,11 @@ def _parse_header(self): f"{stream_id}:{chan_ids}." ) + nvt_segments = 0 + if nvt_stream_infos: + nvt_segments += sum(nvt_stream_info["nvt_segment_infos"].nb_segment for nvt_stream_info in nvt_stream_infos.values()) if ref_sec_structure is not None: - self._nb_segment = len(ref_sec_structure.sects) + self._nb_segment = len(ref_sec_structure.sects) + nvt_segments else: # Use only a single segment if no ncs data is present self._nb_segment = 1 @@ -407,13 +497,13 @@ def min_max_tuple(tuple1, tuple2): result = (min(m for m in mins if m is not None), max(m for m in maxs if m is not None)) return result - # merge stream mmemmaps since streams are compatible + # merge stream memmaps since streams are compatible self._sigs_memmaps = [{} for seg_idx in range(self._nb_segment)] # time limits of integer timestamps in ncs files self._timestamp_limits = [(None, None) for seg_idx in range(self._nb_segment)] # time limits physical times in ncs files self._signal_limits = [(None, None) for seg_idx in range(self._nb_segment)] - for stream_id, stream_info in stream_infos.items(): + for stream_id, stream_info in ncs_stream_infos.items(): stream_mmaps = stream_info["segment_sig_memmaps"] for seg_idx, signal_dict in enumerate(stream_mmaps): self._sigs_memmaps[seg_idx].update(signal_dict) @@ -426,16 +516,32 @@ def min_max_tuple(tuple1, tuple2): t_start = ncs_segment_info.t_start[seg_idx] t_stop = ncs_segment_info.t_stop[seg_idx] self._signal_limits[seg_idx] = min_max_tuple(self._signal_limits[seg_idx], (t_start, t_stop)) + + seg_offset = len(ref_sec_structure.sects) + for stream_id, stream_info in nvt_stream_infos.items(): + stream_mmaps = stream_info["segment_sig_memmaps"] + for seg_idx, signal_dict in enumerate(stream_mmaps): + self._sigs_memmaps[seg_idx + seg_offset].update(signal_dict) + + nvt_segment_info = stream_info["nvt_segment_infos"] + for seg_idx, (t_start, t_stop) in enumerate(nvt_segment_info.timestamp_limits): + self._timestamp_limits[seg_idx + seg_offset] = min_max_tuple(self._timestamp_limits[seg_idx], (t_start, t_stop)) + + for seg_idx in range(nvt_segment_info.nb_segment): + t_start = nvt_segment_info.t_start[seg_idx] + t_stop = nvt_segment_info.t_stop[seg_idx] + self._signal_limits[seg_idx + seg_offset] = min_max_tuple(self._signal_limits[seg_idx], (t_start, t_stop)) # precompute signal lengths within segments self._sigs_length = [] if self._sigs_memmaps: for seg_idx, sig_container in enumerate(self._sigs_memmaps): self._sigs_length.append({}) + print(seg_idx, type(sig_container)) for chan_uid, sig_infos in sig_container.items(): self._sigs_length[seg_idx][chan_uid] = int(sig_infos["nb_valid"].sum()) - # Determine timestamp limits in nev, nse, ntt files by scanning them. + # Determine timestamp limits in nse, ntt, nev files by scanning them. ts0, ts1 = None, None for _data_memmap in (self._spike_memmap, self._nev_memmap): for _, data in _data_memmap.items(): @@ -472,7 +578,7 @@ def min_max_tuple(tuple1, tuple2): self._seg_t_stops[-1] = self.global_t_stop else: - # case HAVE ncs but NO nev or nse -> + # case HAVE ncs but NO nev or nse -> self._seg_t_starts = [limits[0] for limits in self._signal_limits] self._seg_t_stops = [limits[1] for limits in self._signal_limits] self.global_t_start = self._signal_limits[0][0] @@ -495,23 +601,48 @@ def min_max_tuple(tuple1, tuple2): self._generate_minimal_annotations() bl_annotations = self.raw_annotations["blocks"][0] + # generate key sets for ncs and nvt annotations + key_sets = np.unique([d.keys() for d in signal_annotations]) + nvt_key_set = {} + ncs_key_set = {} + for key_set in key_sets: + if "Resolution" in key_set: + nvt_key_set = key_set + else: + ncs_key_set = key_set + for seg_index in range(self._nb_segment): seg_annotations = bl_annotations["segments"][seg_index] for stream_id in range(signal_streams.size): # one or no signal stream stream_ann = seg_annotations["signals"][stream_id] - # handle array annotations - for key in signal_annotations[0].keys(): - values = [] - # only collect values from channels belonging to current stream - for d in np.where(signal_channels["stream_id"] == f"{stream_id}")[0]: - value = signal_annotations[d][key] - values.append(value) - values = np.array(values) - if values.ndim == 1: - # 'InputRange': is 2D and make bugs - stream_ann["__array_annotations__"][key] = values + + if ncs_unit in stream_ann["name"]: + # handle array annotations + for key in ncs_key_set: + values = [] + # only collect values from channels belonging to current stream + for d in np.where(signal_channels["stream_id"] == f"{stream_id}")[0]: + value = signal_annotations[d][key] + values.append(value) + values = np.array(values) + if values.ndim == 1: + # 'InputRange': is 2D and make bugs + stream_ann["__array_annotations__"][key] = values + + elif nvt_unit in stream_ann["name"]: + for key in nvt_key_set: + values = [] + for d in np.where(signal_channels["stream_id"] == f"{stream_id}")[0]: + value = signal_annotations[d][key] + values.append(value) + values = np.array(values) + if values.ndim == 1: + stream_ann["__array_annotations__"][key] = values + + else: + continue for c in range(spike_channels.size): unit_ann = seg_annotations["spikes"][c] @@ -554,6 +685,9 @@ def _get_file_map(filename): elif suffix == "nev": return np.memmap(filename, dtype=nev_dtype, mode="r", offset=NlxHeader.HEADER_SIZE) + + elif suffix == "nvt": + return np.memmap(filename, dtype=NeuralynxRawIO._nvt_dtype, mode="r", offset=NlxHeader.HEADER_SIZE) else: raise ValueError(f"Unknown file suffix {suffix}") @@ -572,6 +706,7 @@ def _get_signal_size(self, block_index, seg_index, stream_index): if len(signals): sig = signals[0] + print("test", self._sigs_length[seg_index]) return self._sigs_length[seg_index][(sig["name"], sig["id"])] else: raise ValueError( @@ -861,6 +996,34 @@ def scan_stream_ncs_files(self, ncs_filenames): stream_section_structure = list(revSectMap.keys())[0] return memmaps, seg_time_limits, stream_section_structure + + def generate_nvt_seg_infos(self): + """ + TODO: write this later. + """ + # HACK: nb_segments assumed to be 1 for now + seg_time_limits = SegmentTimeLimits( + nb_segment=1, t_start=[], t_stop=[], length=[], timestamp_limits=[] + ) + + ts0, ts1 = None, None + for nvt_memmap in self._nvt_memmaps: + for _, data in nvt_memmap.items(): + seg_time_limits.length.append(data.shape[0]) + ts = data["timestamp"] + + if ts.size == 0: + continue + if ts0 is None: + ts0 = ts[0] + ts1 = ts[-1] + + ts0, ts1 = min(ts0, ts[0]), max(ts1, ts[-1]) + seg_time_limits.t_start.append(ts0 / 1e6) + seg_time_limits.t_stop.append(ts1 / 1e6) + seg_time_limits.timestamp_limits.append((ts0, ts1)) + + return seg_time_limits # time limits for set of segments diff --git a/neo/rawio/neuralynxrawio/nlxheader.py b/neo/rawio/neuralynxrawio/nlxheader.py index 45caadfbb..38758a94c 100644 --- a/neo/rawio/neuralynxrawio/nlxheader.py +++ b/neo/rawio/neuralynxrawio/nlxheader.py @@ -78,8 +78,8 @@ def _to_bool(txt): ("Sharpness", "", int), ("DirectionOffset", "", int), ("Resolution", "", None), - ("CameraDelay", "", int), - ("EnableFieldEstimation", "field_estimation_enabled", _to_bool), + ("CameraDelay", "", float), + ("EnableFieldEstimation", "", _to_bool), ("TargetDist", "", None), ] From 8aeb5beae818e5c683be91823fe12d037de1a462 Mon Sep 17 00:00:00 2001 From: LEGion-42 Date: Sat, 25 May 2024 16:51:22 -0400 Subject: [PATCH 05/24] feat(NeuralynxRawIO): add nvt support Only loading dnextracted_x, dnextracted_y and dnextracted_angle as 3 dimensionless signal channels. Only tested on directories with single nvt file. --- neo/rawio/neuralynxrawio/neuralynxrawio.py | 213 ++++++++++++--------- 1 file changed, 121 insertions(+), 92 deletions(-) diff --git a/neo/rawio/neuralynxrawio/neuralynxrawio.py b/neo/rawio/neuralynxrawio/neuralynxrawio.py index 5028d145d..fb58a83b3 100644 --- a/neo/rawio/neuralynxrawio/neuralynxrawio.py +++ b/neo/rawio/neuralynxrawio/neuralynxrawio.py @@ -54,6 +54,7 @@ ) from operator import itemgetter import numpy as np +import pdb import os import pathlib import copy @@ -138,10 +139,9 @@ class NeuralynxRawIO(BaseRawIO): ("timestamp", "uint64"), ("bitfield_points", "uint32", (400,)), ("unused", "int16"), - ("test", "int32", (3,)), - # ("x_location", "int32"), - # ("y_location", "int32"), - # ("head_angle", "int32"), + ("x_location", "int32"), + ("y_location", "int32"), + ("head_angle", "int32"), ("colored_tgts", "int32", (50,)), ] @@ -171,11 +171,7 @@ def _source_name(self): def _parse_header(self): _ncs_sample_dtype = [dtype[1] for dtype in NeuralynxRawIO._ncs_dtype if dtype[0] == "samples"][0] - # _nvt_sample_dtype = [dtype[1] for dtype in NeuralynxRawIO._nvt_dtype if dtype[0] == "x_location"][0] - _nvt_sample_dtype = "int32" - - ncs_unit = "#packet" - nvt_unit = "#frame" + _nvt_sample_dtype = [dtype[1] for dtype in NeuralynxRawIO._nvt_dtype if dtype[0] == "x_location"][0] stream_channels = [] signal_channels = [] @@ -369,47 +365,47 @@ def _parse_header(self): # nvt files are passed as signals bundled into signal streams elif ext == "nvt": - file_mmap = self._get_file_map(filename) - system_id = np.unique(file_mmap["system_id"]).item() - chan_uid = (chan_name, str(system_id)) - self.nvt_filenames[chan_uid] = filename + units = "dimensionless" + gain = 1.0 + offset = info["CameraDelay"] # NOTE: assuming that the offset means time offset - n_frames = copy.copy(file_mmap.shape[0]) - if n_frames: - t_start = copy.copy(file_mmap[0][3]) - if system_id not in self.tracker_system_ids: - self.tracker_system_ids.append(system_id) - else: - t_start = 0 + # TODO: to support multiple files, we need to adjust range since i must be unique + for i in range(len(nvt_selected_features)): + file_mmap = self._get_file_map(filename) - stream_prop = (info["sampling_rate"], n_frames, t_start) + chan_uid = (chan_name, str(i)) + self.nvt_filenames[chan_uid] = filename - if stream_prop not in stream_props: - stream_props[stream_prop] = {"stream_id": len(stream_props), "filenames": [filename]} - else: - stream_props[stream_prop]["filenames"].append(filename) - stream_id = stream_props[stream_prop]["stream_id"] + n_frames = copy.copy(file_mmap.shape[0]) + if n_frames: + t_start = copy.copy(file_mmap[0][3]) + if i not in self.tracker_system_ids: + self.tracker_system_ids.append(i) + else: + t_start = 0 - units = "dimensionless" - gain = 1.0 - offset = info["CameraDelay"] # NOTE: assuming that the offset means time offset - signal_channels.append( - (chan_name, str(system_id), info["sampling_rate"], _nvt_sample_dtype, units, gain, offset, stream_id) - ) + stream_prop = (info["sampling_rate"], n_frames, t_start) - # NOTE: only loading the selected features here. "bitfield_points" and "colored_tgts" are not loaded due to their dimensionality - # self._nvt_memmap[chan_id] = file_mmap[["timestamp", "system_id", "x_location", "y_location", "head_angle"]] - self._nvt_memmaps.append({chan_uid : file_mmap[["timestamp", "system_id", "test"]]}) - # print(self._nvt_memmap[chan_id][:5]) + if stream_prop not in stream_props: + stream_props[stream_prop] = {"stream_id": len(stream_props), "filenames": [filename]} + else: + stream_props[stream_prop]["filenames"].append(filename) + stream_id = stream_props[stream_prop]["stream_id"] - keys = [ - "recording_opened", - "VideoFormat", - "Resolution", - ] - d = {k: info[k] for k in keys if k in info} - signal_annotations.append(d) + signal_channels.append((chan_name, str(i), info["sampling_rate"], _nvt_sample_dtype, units, gain, offset, stream_id)) + + # NOTE: only loading the selected features here. "bitfield_points" and "colored_tgts" are not loaded due to their dimensionality + # self._nvt_memmap[chan_id] = file_mmap[["timestamp", "system_id", "x_location", "y_location", "head_angle"]] + self._nvt_memmaps.append({chan_uid : file_mmap[["timestamp", nvt_selected_features[i]]]}) + + keys = [ + # "recording_opened", + # "VideoFormat", + # "Resolution", + ] + d = {k: info[k] for k in keys if k in info} + signal_annotations.append(d) signal_channels = np.array(signal_channels, dtype=_signal_channel_dtype) spike_channels = np.array(spike_channels, dtype=_spike_channel_dtype) @@ -451,13 +447,16 @@ def _parse_header(self): "ncs_segment_infos": ncsSegTimestampLimits, "section_structure": section_structure, } + # list with 1 element (a single dict), keys are chan_uids, values are memmaps + # print(ncs_stream_infos[stream_id]["segment_sig_memmaps"]) else: stream_filenames = [self.nvt_filenames[chuid] for chuid in stream_chan_uids] + nvt_memmaps, time_infos = self.generate_nvt_seg_infos() nvt_stream_infos[stream_id] = { - "segment_sig_memmaps": self._nvt_memmaps, - "nvt_segment_infos": self.generate_nvt_seg_infos(), + "segment_sig_memmaps": nvt_memmaps, + "nvt_segment_infos": time_infos, "section_structure": None, } @@ -482,11 +481,8 @@ def _parse_header(self): f"{stream_id}:{chan_ids}." ) - nvt_segments = 0 - if nvt_stream_infos: - nvt_segments += sum(nvt_stream_info["nvt_segment_infos"].nb_segment for nvt_stream_info in nvt_stream_infos.values()) if ref_sec_structure is not None: - self._nb_segment = len(ref_sec_structure.sects) + nvt_segments + self._nb_segment = len(ref_sec_structure.sects) else: # Use only a single segment if no ncs data is present self._nb_segment = 1 @@ -517,29 +513,30 @@ def min_max_tuple(tuple1, tuple2): t_stop = ncs_segment_info.t_stop[seg_idx] self._signal_limits[seg_idx] = min_max_tuple(self._signal_limits[seg_idx], (t_start, t_stop)) - seg_offset = len(ref_sec_structure.sects) for stream_id, stream_info in nvt_stream_infos.items(): stream_mmaps = stream_info["segment_sig_memmaps"] for seg_idx, signal_dict in enumerate(stream_mmaps): - self._sigs_memmaps[seg_idx + seg_offset].update(signal_dict) + self._sigs_memmaps[seg_idx].update(signal_dict) nvt_segment_info = stream_info["nvt_segment_infos"] for seg_idx, (t_start, t_stop) in enumerate(nvt_segment_info.timestamp_limits): - self._timestamp_limits[seg_idx + seg_offset] = min_max_tuple(self._timestamp_limits[seg_idx], (t_start, t_stop)) + self._timestamp_limits[seg_idx] = min_max_tuple(self._timestamp_limits[seg_idx], (t_start, t_stop)) for seg_idx in range(nvt_segment_info.nb_segment): t_start = nvt_segment_info.t_start[seg_idx] t_stop = nvt_segment_info.t_stop[seg_idx] - self._signal_limits[seg_idx + seg_offset] = min_max_tuple(self._signal_limits[seg_idx], (t_start, t_stop)) + self._signal_limits[seg_idx] = min_max_tuple(self._signal_limits[seg_idx], (t_start, t_stop)) # precompute signal lengths within segments self._sigs_length = [] if self._sigs_memmaps: for seg_idx, sig_container in enumerate(self._sigs_memmaps): self._sigs_length.append({}) - print(seg_idx, type(sig_container)) for chan_uid, sig_infos in sig_container.items(): - self._sigs_length[seg_idx][chan_uid] = int(sig_infos["nb_valid"].sum()) + if sig_infos[0].dtype == NeuralynxRawIO._ncs_dtype: + self._sigs_length[seg_idx][chan_uid] = int(sig_infos["nb_valid"].sum()) + else: + self._sigs_length[seg_idx][chan_uid] = sig_infos.shape[0] # Determine timestamp limits in nse, ntt, nev files by scanning them. ts0, ts1 = None, None @@ -638,8 +635,7 @@ def min_max_tuple(tuple1, tuple2): value = signal_annotations[d][key] values.append(value) values = np.array(values) - if values.ndim == 1: - stream_ann["__array_annotations__"][key] = values + stream_ann["__array_annotations__"][key] = values else: continue @@ -706,7 +702,6 @@ def _get_signal_size(self, block_index, seg_index, stream_index): if len(signals): sig = signals[0] - print("test", self._sigs_length[seg_index]) return self._sigs_length[seg_index][(sig["name"], sig["id"])] else: raise ValueError( @@ -720,7 +715,6 @@ def _get_signal_t_start(self, block_index, seg_index, stream_index): # use first channel of stream as all channels in stream have a common t_start channel = self.header["signal_channels"][stream_mask][0] - data = self._sigs_memmaps[seg_index][(channel["name"], channel["id"])] absolute_t_start = data["timestamp"][0] @@ -747,34 +741,55 @@ def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, strea ------- array of samples, with each requested channel in a column """ - if i_start is None: - i_start = 0 - if i_stop is None: - i_stop = self.get_signal_size(block_index=block_index, seg_index=seg_index, stream_index=stream_index) + if ncs_unit in str(self.header["signal_streams"][stream_index]): + if i_start is None: + i_start = 0 + if i_stop is None: + i_stop = self.get_signal_size(block_index=block_index, seg_index=seg_index, stream_index=stream_index) - block_start = i_start // NcsSection._RECORD_SIZE - block_stop = i_stop // NcsSection._RECORD_SIZE + 1 - sl0 = i_start % 512 - sl1 = sl0 + (i_stop - i_start) + block_start = i_start // NcsSection._RECORD_SIZE + block_stop = i_stop // NcsSection._RECORD_SIZE + 1 + sl0 = i_start % 512 + sl1 = sl0 + (i_stop - i_start) - if channel_indexes is None: - channel_indexes = slice(None) + if channel_indexes is None: + channel_indexes = slice(None) - stream_id = self.header["signal_streams"][stream_index]["id"] - stream_mask = self.header["signal_channels"]["stream_id"] == stream_id + stream_id = self.header["signal_streams"][stream_index]["id"] + stream_mask = self.header["signal_channels"]["stream_id"] == stream_id - channel_ids = self.header["signal_channels"][stream_mask][channel_indexes]["id"] - channel_names = self.header["signal_channels"][stream_mask][channel_indexes]["name"] + channel_ids = self.header["signal_channels"][stream_mask][channel_indexes]["id"] + channel_names = self.header["signal_channels"][stream_mask][channel_indexes]["name"] - # create buffer for samples - sigs_chunk = np.zeros((i_stop - i_start, len(channel_ids)), dtype="int16") + # create buffer for samples + sigs_chunk = np.zeros((i_stop - i_start, len(channel_ids)), dtype="int16") + + for i, chan_uid in enumerate(zip(channel_names, channel_ids)): + data = self._sigs_memmaps[seg_index][chan_uid] + sub = data[block_start:block_stop] + sigs_chunk[:, i] = sub["samples"].flatten()[sl0:sl1] + return sigs_chunk + + else: + if i_start is None: + i_start = 0 + if i_stop is None: + i_stop = self.get_signal_size(block_index=block_index, seg_index=seg_index, stream_index=stream_index) - for i, chan_uid in enumerate(zip(channel_names, channel_ids)): - data = self._sigs_memmaps[seg_index][chan_uid] - sub = data[block_start:block_stop] - sigs_chunk[:, i] = sub["samples"].flatten()[sl0:sl1] + stream_id = self.header["signal_streams"][stream_index]["id"] + stream_mask = self.header["signal_channels"]["stream_id"] == stream_id - return sigs_chunk + # HACK: for some reason channel_ids and channel_names have an extra dimension, adding [0] fixes it temporarily + channel_ids = self.header["signal_channels"][stream_mask][channel_indexes]["id"][0] + channel_names = self.header["signal_channels"][stream_mask][channel_indexes]["name"][0] + + sig_chunk = np.zeros((i_stop - i_start, len(nvt_selected_features)), dtype="int32") + + for i, chan_uid in enumerate(zip(channel_names, channel_ids)): + data = self._sigs_memmaps[seg_index][chan_uid] + sig_chunk[:, i] = data[nvt_selected_features[i]] + + return sig_chunk def _spike_count(self, block_index, seg_index, unit_index): chan_uid, unit_id = self.internal_unit_ids[unit_index] @@ -999,32 +1014,46 @@ def scan_stream_ncs_files(self, ncs_filenames): def generate_nvt_seg_infos(self): """ + NOTE: this will not work on multiple nvt files, this is just a temporary solution + maybe add a check for multiple nvt files and raise an error if there are more than one TODO: write this later. """ - # HACK: nb_segments assumed to be 1 for now + # HACK: nb_segments assumed to be 1, it really doesn't matter for now seg_time_limits = SegmentTimeLimits( nb_segment=1, t_start=[], t_stop=[], length=[], timestamp_limits=[] ) + memmaps ={} + prev_chan_name = None ts0, ts1 = None, None for nvt_memmap in self._nvt_memmaps: - for _, data in nvt_memmap.items(): - seg_time_limits.length.append(data.shape[0]) - ts = data["timestamp"] + for key, data in nvt_memmap.items(): + chan_name = key[0] + if chan_name != prev_chan_name: + prev_chan_name = chan_name + seg_time_limits.length.append(data.shape[0]) + ts = data["timestamp"] - if ts.size == 0: - continue - if ts0 is None: ts0 = ts[0] ts1 = ts[-1] - ts0, ts1 = min(ts0, ts[0]), max(ts1, ts[-1]) - seg_time_limits.t_start.append(ts0 / 1e6) - seg_time_limits.t_stop.append(ts1 / 1e6) - seg_time_limits.timestamp_limits.append((ts0, ts1)) + ts0, ts1 = min(ts0, ts[0]), max(ts1, ts[-1]) + seg_time_limits.t_start.append(ts0 / 1e6) + seg_time_limits.t_stop.append(ts1 / 1e6) + seg_time_limits.timestamp_limits.append((ts0, ts1)) + + if chan_name not in memmaps: + memmaps[chan_name] = {} + memmaps[chan_name][key] = data + + + memmaps = list(memmaps.values()) - return seg_time_limits + return memmaps, seg_time_limits +ncs_unit = "#packet" +nvt_unit = "#frame" +nvt_selected_features = ["x_location", "y_location", "head_angle"] # time limits for set of segments SegmentTimeLimits = namedtuple("SegmentTimeLimits", ["nb_segment", "t_start", "t_stop", "length", "timestamp_limits"]) From 1661a10a1dc002a44574f58e35b02358d77d90f6 Mon Sep 17 00:00:00 2001 From: LEGion-42 Date: Sat, 25 May 2024 18:07:35 -0400 Subject: [PATCH 06/24] feat(NeuralynxRawIO): add Resolution to AnalogSignal.array_annotations as literal string of a list --- neo/rawio/neuralynxrawio/neuralynxrawio.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/neo/rawio/neuralynxrawio/neuralynxrawio.py b/neo/rawio/neuralynxrawio/neuralynxrawio.py index fb58a83b3..4f8441974 100644 --- a/neo/rawio/neuralynxrawio/neuralynxrawio.py +++ b/neo/rawio/neuralynxrawio/neuralynxrawio.py @@ -54,7 +54,6 @@ ) from operator import itemgetter import numpy as np -import pdb import os import pathlib import copy @@ -173,6 +172,8 @@ def _parse_header(self): _ncs_sample_dtype = [dtype[1] for dtype in NeuralynxRawIO._ncs_dtype if dtype[0] == "samples"][0] _nvt_sample_dtype = [dtype[1] for dtype in NeuralynxRawIO._nvt_dtype if dtype[0] == "x_location"][0] + nvt_counter = 0 + stream_channels = [] signal_channels = [] spike_channels = [] @@ -363,8 +364,11 @@ def _parse_header(self): self._nev_memmap[chan_id] = data - # nvt files are passed as signals bundled into signal streams + # nvt file is passed as signals bundled into a signal stream separate from the ncs stream elif ext == "nvt": + nvt_counter += 1 + if nvt_counter > 1: + raise ValueError("Reading multiple nvt files in one session are not yet supported.") units = "dimensionless" gain = 1.0 @@ -399,10 +403,11 @@ def _parse_header(self): # self._nvt_memmap[chan_id] = file_mmap[["timestamp", "system_id", "x_location", "y_location", "head_angle"]] self._nvt_memmaps.append({chan_uid : file_mmap[["timestamp", nvt_selected_features[i]]]}) + info["Resolution"] = str(info["Resolution"]) keys = [ - # "recording_opened", - # "VideoFormat", - # "Resolution", + "recording_opened", + "VideoFormat", + "Resolution", ] d = {k: info[k] for k in keys if k in info} signal_annotations.append(d) @@ -635,7 +640,8 @@ def min_max_tuple(tuple1, tuple2): value = signal_annotations[d][key] values.append(value) values = np.array(values) - stream_ann["__array_annotations__"][key] = values + if values.ndim == 1: + stream_ann["__array_annotations__"][key] = values else: continue From e912625a076c227917a71b56a042511e99b44d0e Mon Sep 17 00:00:00 2001 From: LEGion-42 Date: Sat, 25 May 2024 18:09:01 -0400 Subject: [PATCH 07/24] chore: update gitignore --- .gitignore | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 0f023f9fb..15c9cccfb 100644 --- a/.gitignore +++ b/.gitignore @@ -80,4 +80,5 @@ doc/*.nwb *.plx *.smr B95.zip -grouped_ephys \ No newline at end of file +grouped_ephys +test.py From 9df446e0a3148e655f34c5d0d41ea17c9a4a454b Mon Sep 17 00:00:00 2001 From: LEGion-42 Date: Sat, 25 May 2024 18:14:33 -0400 Subject: [PATCH 08/24] chore: update .gitignore --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 15c9cccfb..1d450b96b 100644 --- a/.gitignore +++ b/.gitignore @@ -81,4 +81,4 @@ doc/*.nwb *.smr B95.zip grouped_ephys -test.py +nvt_test.py From 7f4c8a3109809b4f93c0a379185230a2909fef43 Mon Sep 17 00:00:00 2001 From: LEGion-42 Date: Sat, 25 May 2024 21:23:23 -0400 Subject: [PATCH 09/24] chore(NeuralynxIO): add code documentation and option to ignore nvt files for NeuralynxIO and NeuralynxRawIO --- neo/io/neuralynxio.py | 10 ++- neo/rawio/neuralynxrawio/neuralynxrawio.py | 86 +++++++++++++--------- 2 files changed, 60 insertions(+), 36 deletions(-) diff --git a/neo/io/neuralynxio.py b/neo/io/neuralynxio.py index ed5cd0217..03139dad5 100644 --- a/neo/io/neuralynxio.py +++ b/neo/io/neuralynxio.py @@ -16,12 +16,13 @@ class NeuralynxIO(NeuralynxRawIO, BaseFromRaw): """ Class for reading data from Neuralynx files. - This IO supports NCS, NEV, NSE and NTT file formats. + This IO supports NCS, NEV, NSE, NTT and NVT file formats. NCS contains signals for one channel NEV contains events NSE contains spikes and waveforms for mono electrodes NTT contains spikes and waveforms for tetrodes + NVT contains coordinates and head angles for video tracking """ _prefered_signal_group_mode = "group-by-same-units" @@ -35,6 +36,7 @@ def __init__( cache_path="same_as_resource", exclude_filename=None, keep_original_times=False, + ignore_nvt=False, ): """ Initialise IO instance @@ -59,6 +61,11 @@ def __init__( Preserve original time stamps as in data files. By default datasets are shifted to begin at t_start = 0*pq.second. Default: False + ignore_nvt : bool + Ignore NVT files when loading data. This is only a temporary argument before + support for multiple NVT files are added. Turn it on if there are multiple NVT + files in the directory. + Default: False """ NeuralynxRawIO.__init__( self, @@ -68,6 +75,7 @@ def __init__( cache_path=cache_path, exclude_filename=exclude_filename, keep_original_times=keep_original_times, + ignore_nvt=ignore_nvt, ) if self.rawmode == "one-file": BaseFromRaw.__init__(self, filename) diff --git a/neo/rawio/neuralynxrawio/neuralynxrawio.py b/neo/rawio/neuralynxrawio/neuralynxrawio.py index 4f8441974..1e0b89731 100644 --- a/neo/rawio/neuralynxrawio/neuralynxrawio.py +++ b/neo/rawio/neuralynxrawio/neuralynxrawio.py @@ -1,12 +1,12 @@ """ Class for reading data from Neuralynx files. -This IO supports NCS, NEV, NSE and NTT file formats. - +This IO supports NCS, NEV, NSE, NTT and NVT file formats. NCS contains the sampled signal for one channel NEV contains events NSE contains spikes and waveforms for mono electrodes NTT contains spikes and waveforms for tetrodes +NVT contains coordinates and head angles for video tracking All Neuralynx files contain a 16 kilobyte text header followed by 0 or more fixed length records. The format of the header has never been formally specified, however, the Neuralynx programs which @@ -38,6 +38,17 @@ outside of Segments defined by .Ncs files will be ignored. To access all time point data in a single Segment load a session excluding .Ncs files. +This RawIO only partially support the NVT file format, limitations include: + * Only loads the dnextracted_x, dnextracted_y and dnextracted_angle data fields from NVT files. + Other fields that could be potentially useful (dwPoints and dntargets) are not yet supported + due to their format. + * Only a single NVT file can be loaded per session. + * The NVT is assumed to be in the same segment (sharing a common clock (time basis)) as the + NCS files. + +The x and y pixel coordinates and animal head angle from the nvt file are treated as dimensionless +analog signals bundled into a signal stream separate from the NCS stream. + Continuous data streams are ordered by descending sampling rate. This RawIO presents only a single Block. @@ -91,10 +102,15 @@ class NeuralynxRawIO(BaseRawIO): consecutive data packet is more than one sample interval. * strict_gap_mode = False then a gap has an increased tolerance. Some new system with different clock need this option otherwise, too many gaps are detected + ignore_nvt : bool + Ignore NVT files when loading data. This is only a temporary argument before + support for multiple NVT files are added. Turn it on if there are multiple NVT + files in the directory. + Default: False Notes ----- - * This IO supports NCS, NEV, NSE and NTT file formats (not NVT or NRD yet) + * This IO supports NCS, NEV, NSE and NTT file formats (not NRD yet) * These variations of header format and possible differences between the stated sampling frequency and actual sampling frequency can create apparent time discrepancies in .Ncs files. Additionally, @@ -120,7 +136,7 @@ class NeuralynxRawIO(BaseRawIO): Display all information about signal channels, units, segment size.... """ - extensions = ["nse", "ncs", "nev", "ntt", "nvt", "nrd"] # nvt and nrd are not yet supported + extensions = ["nse", "ncs", "nev", "ntt", "nvt", "nrd"] # nrd is not yet supported rawmode = "one-dir" _ncs_dtype = [ @@ -131,21 +147,9 @@ class NeuralynxRawIO(BaseRawIO): ("samples", "int16", (NcsSection._RECORD_SIZE)), ] - _nvt_dtype = [ - ("swstx", "uint16"), - ("system_id", "uint16"), - ("data_size", "uint16"), - ("timestamp", "uint64"), - ("bitfield_points", "uint32", (400,)), - ("unused", "int16"), - ("x_location", "int32"), - ("y_location", "int32"), - ("head_angle", "int32"), - ("colored_tgts", "int32", (50,)), - ] def __init__( - self, dirname="", filename="", exclude_filename=None, keep_original_times=False, strict_gap_mode=True, **kargs + self, dirname="", filename="", exclude_filename=None, keep_original_times=False, strict_gap_mode=True, ignore_nvt=False, **kargs ): if dirname != "": @@ -160,6 +164,7 @@ def __init__( self.keep_original_times = keep_original_times self.strict_gap_mode = strict_gap_mode self.exclude_filename = exclude_filename + self.ignore_nvt = ignore_nvt BaseRawIO.__init__(self, **kargs) def _source_name(self): @@ -170,7 +175,7 @@ def _source_name(self): def _parse_header(self): _ncs_sample_dtype = [dtype[1] for dtype in NeuralynxRawIO._ncs_dtype if dtype[0] == "samples"][0] - _nvt_sample_dtype = [dtype[1] for dtype in NeuralynxRawIO._nvt_dtype if dtype[0] == "x_location"][0] + _nvt_sample_dtype = [dtype[1] for dtype in _nvt_dtype if dtype[0] == "x_location"][0] nvt_counter = 0 @@ -182,7 +187,7 @@ def _parse_header(self): self.ncs_filenames = OrderedDict() # (chan_name, chan_id): filename self.nse_ntt_filenames = OrderedDict() # (chan_name, chan_id): filename self.nev_filenames = OrderedDict() # chan_id: filename - self.nvt_filenames = OrderedDict() # chan_id: filename + self.nvt_filenames = OrderedDict() self.file_headers = OrderedDict() # filename: file header dict @@ -197,7 +202,7 @@ def _parse_header(self): self._empty_nse_ntt = [] self._empty_nvt = [] - # Explore the directory looking for ncs, nev, nse and ntt + # Explore the directory looking for ncs, nev, nse, ntt and nvt files # and construct channels headers. signal_annotations = [] unit_annotations = [] @@ -237,7 +242,7 @@ def _parse_header(self): if ext not in self.extensions: continue - # Skip Ncs files with only header. Other empty file types + # Skip Ncs and nvt files with only header. Other empty file types # will have an empty dataset constructed later. if (os.path.getsize(filename) <= NlxHeader.HEADER_SIZE) and ext in ["ncs", "nvt"]: if ext == "ncs": @@ -365,16 +370,20 @@ def _parse_header(self): self._nev_memmap[chan_id] = data # nvt file is passed as signals bundled into a signal stream separate from the ncs stream - elif ext == "nvt": + elif ext == "nvt" and not self.ignore_nvt: nvt_counter += 1 if nvt_counter > 1: - raise ValueError("Reading multiple nvt files in one session are not yet supported.") + raise ValueError(""" + Reading multiple nvt files in one session are not yet supported. + Try loading each nvt files separately or set ignore_nvt=True. + """) units = "dimensionless" gain = 1.0 offset = info["CameraDelay"] # NOTE: assuming that the offset means time offset - # TODO: to support multiple files, we need to adjust range since i must be unique + # treating each feature as a separate channel to emulate ncs channels + # TODO: to support multiple files, we need to adjust range since i must be unique i's for i in range(len(nvt_selected_features)): file_mmap = self._get_file_map(filename) @@ -400,7 +409,6 @@ def _parse_header(self): signal_channels.append((chan_name, str(i), info["sampling_rate"], _nvt_sample_dtype, units, gain, offset, stream_id)) # NOTE: only loading the selected features here. "bitfield_points" and "colored_tgts" are not loaded due to their dimensionality - # self._nvt_memmap[chan_id] = file_mmap[["timestamp", "system_id", "x_location", "y_location", "head_angle"]] self._nvt_memmaps.append({chan_uid : file_mmap[["timestamp", nvt_selected_features[i]]]}) info["Resolution"] = str(info["Resolution"]) @@ -438,7 +446,7 @@ def _parse_header(self): nvt_stream_infos = {} # Read ncs files of each stream for gap detection and nb_segment computation. - # Since nvt files are passed as signals, we need to filter them out. + # HACK: filter out the nvt data for now since nvt files are passed as signals, and same segment is assumed. for stream_id in np.unique(signal_channels["stream_id"]): stream_channels = signal_channels[signal_channels["stream_id"] == stream_id] stream_chan_uids = zip(stream_channels["name"], stream_channels["id"]) @@ -452,8 +460,6 @@ def _parse_header(self): "ncs_segment_infos": ncsSegTimestampLimits, "section_structure": section_structure, } - # list with 1 element (a single dict), keys are chan_uids, values are memmaps - # print(ncs_stream_infos[stream_id]["segment_sig_memmaps"]) else: stream_filenames = [self.nvt_filenames[chuid] for chuid in stream_chan_uids] @@ -689,7 +695,7 @@ def _get_file_map(filename): return np.memmap(filename, dtype=nev_dtype, mode="r", offset=NlxHeader.HEADER_SIZE) elif suffix == "nvt": - return np.memmap(filename, dtype=NeuralynxRawIO._nvt_dtype, mode="r", offset=NlxHeader.HEADER_SIZE) + return np.memmap(filename, dtype=_nvt_dtype, mode="r", offset=NlxHeader.HEADER_SIZE) else: raise ValueError(f"Unknown file suffix {suffix}") @@ -785,7 +791,7 @@ def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, strea stream_id = self.header["signal_streams"][stream_index]["id"] stream_mask = self.header["signal_channels"]["stream_id"] == stream_id - # HACK: for some reason channel_ids and channel_names have an extra dimension, adding [0] fixes it temporarily + # HACK: for some reason channel_ids and channel_names have an extra dimension, adding [0] fixes it channel_ids = self.header["signal_channels"][stream_mask][channel_indexes]["id"][0] channel_names = self.header["signal_channels"][stream_mask][channel_indexes]["name"][0] @@ -1020,9 +1026,9 @@ def scan_stream_ncs_files(self, ncs_filenames): def generate_nvt_seg_infos(self): """ - NOTE: this will not work on multiple nvt files, this is just a temporary solution - maybe add a check for multiple nvt files and raise an error if there are more than one - TODO: write this later. + Since NVT files are processed in a similar way to NCS files, this RawIO pass them in similar + data structures internally. this function simply emulates the scan_stream_ncs_files function + for NVT files so that the data can be processed in the same way. """ # HACK: nb_segments assumed to be 1, it really doesn't matter for now seg_time_limits = SegmentTimeLimits( @@ -1052,9 +1058,7 @@ def generate_nvt_seg_infos(self): memmaps[chan_name] = {} memmaps[chan_name][key] = data - memmaps = list(memmaps.values()) - return memmaps, seg_time_limits ncs_unit = "#packet" @@ -1078,6 +1082,18 @@ def generate_nvt_seg_infos(self): ("event_string", "S128"), ] +_nvt_dtype = [ + ("swstx", "uint16"), + ("system_id", "uint16"), + ("data_size", "uint16"), + ("timestamp", "uint64"), + ("bitfield_points", "uint32", (400,)), + ("unused", "int16"), + ("x_location", "int32"), + ("y_location", "int32"), + ("head_angle", "int32"), + ("colored_tgts", "int32", (50,)), +] def get_nse_or_ntt_dtype(info, ext): """ From 9454ce5613650c62aef45ca141424a73b7d988a8 Mon Sep 17 00:00:00 2001 From: LEGion-42 Date: Mon, 29 Apr 2024 00:42:39 -0400 Subject: [PATCH 10/24] fix: typos --- neo/rawio/neuralynxrawio/neuralynxrawio.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neo/rawio/neuralynxrawio/neuralynxrawio.py b/neo/rawio/neuralynxrawio/neuralynxrawio.py index 1f6367ee5..00283b458 100644 --- a/neo/rawio/neuralynxrawio/neuralynxrawio.py +++ b/neo/rawio/neuralynxrawio/neuralynxrawio.py @@ -87,8 +87,8 @@ class NeuralynxRawIO(BaseRawIO): Otherwise set 0 of time to first time in dataset strict_gap_mode: bool, default: True Detect gaps using strict mode or not. - * strict_gap_mode = True then a gap is consider when timstamp difference between two - consequtive data packet is more than one sample interval. + * strict_gap_mode = True then a gap is consider when timestamp difference between two + consecutive data packet is more than one sample interval. * strict_gap_mode = False then a gap has an increased tolerance. Some new system with different clock need this option otherwise, too many gaps are detected From 187168c108e7f8283ef55420545d6861d861721b Mon Sep 17 00:00:00 2001 From: LEGion-42 Date: Thu, 23 May 2024 11:58:55 -0400 Subject: [PATCH 11/24] fix: correct regex in NlxHeader, previous regex doesn't register multiple values feat: add header support for Neuralynx nvt files --- neo/rawio/neuralynxrawio/nlxheader.py | 50 +++++++++++++++++++++++++-- 1 file changed, 48 insertions(+), 2 deletions(-) diff --git a/neo/rawio/neuralynxrawio/nlxheader.py b/neo/rawio/neuralynxrawio/nlxheader.py index 8d1de1ca2..d8b43feaa 100644 --- a/neo/rawio/neuralynxrawio/nlxheader.py +++ b/neo/rawio/neuralynxrawio/nlxheader.py @@ -66,6 +66,21 @@ def _to_bool(txt): ("AcquisitionSystem", "", None), ("ReferenceChannel", "", None), ("NLX_Base_Class_Type", "", None), # in version 4 and earlier versions of Cheetah + ("VideoFormat", "", None), + ("IntensityThreshold", "", None), + ("RedThreshold", "", None), + ("GreenThreshold", "", None), + ("BlueThreshold", "", None), + ("Saturation", "", int), + ("Hue", "", int), + ("Brightness", "", int), + ("Contrast", "", int), + ("Sharpness", "", int), + ("DirectionOffset", "", int), + ("Resolution", "", None), + ("CameraDelay", "", int), + ("EnableFieldEstimation", "field_estimation_enabled", _to_bool), + ("TargetDist", "", None), ] # Filename and datetime may appear in header lines starting with # at @@ -170,16 +185,17 @@ def read_properties(self, filename, txt_header): :param filename: name of ncs file, used for extracting channel number :param txt_header: header text """ + print(txt_header) # find keys for k1, k2, type_ in NlxHeader.txt_header_keys: - pattern = r"-(?P" + k1 + r")\s+(?P[\S ]*)" + pattern = r"-(?P" + k1 + r")\s+(?P.+)" matches = re.findall(pattern, txt_header) for match in matches: if k2 == "": name = match[0] else: name = k2 - value = match[1].rstrip(" ") + value = match[1].replace("\t", " ").replace("\r", "").rstrip(" ") if type_ is not None: value = type_(value) self[name] = value @@ -243,6 +259,36 @@ def read_properties(self, filename, txt_header): assert len(self["InputRange"]) == len( chid_entries ), "Number of channel ids does not match input range values." + if "Resolution" in self: + ir_entries = re.findall(r"\w+", self["Resolution"]) + if len(ir_entries) == 1: + self["Resolution"] = [int(ir_entries[0])] * len(chid_entries) + else: + self["Resolution"] = [int(e) for e in ir_entries] + if "IntensityThreshold" in self: + ir_entries = re.findall(r"\w+", self["IntensityThreshold"]) + if len(ir_entries) == 1: + self["IntensityThreshold"] = [int(ir_entries[0])] * len(chid_entries) + else: + self["IntensityThreshold"] = [int(e) for e in ir_entries] + if "RedThreshold" in self: + ir_entries = re.findall(r"\w+", self["RedThreshold"]) + if len(ir_entries) == 1: + self["RedThreshold"] = [int(ir_entries[0])] * len(chid_entries) + else: + self["RedThreshold"] = [int(e) for e in ir_entries] + if "GreenThreshold" in self: + ir_entries = re.findall(r"\w+", self["GreenThreshold"]) + if len(ir_entries) == 1: + self["GreenThreshold"] = [int(ir_entries[0])] * len(chid_entries) + else: + self["GreenThreshold"] = [int(e) for e in ir_entries] + if "BlueThreshold" in self: + ir_entries = re.findall(r"\w+", self["BlueThreshold"]) + if len(ir_entries) == 1: + self["BlueThreshold"] = [int(ir_entries[0])] * len(chid_entries) + else: + self["BlueThreshold"] = [int(e) for e in ir_entries] def readTimeDate(self, txt_header): """ From 9337562beee23f9945a35c9ff08c9a259e92013a Mon Sep 17 00:00:00 2001 From: LEGion-42 Date: Thu, 23 May 2024 12:08:17 -0400 Subject: [PATCH 12/24] fix: remove a print statement for debugging purpose --- neo/rawio/neuralynxrawio/nlxheader.py | 1 - 1 file changed, 1 deletion(-) diff --git a/neo/rawio/neuralynxrawio/nlxheader.py b/neo/rawio/neuralynxrawio/nlxheader.py index d8b43feaa..45caadfbb 100644 --- a/neo/rawio/neuralynxrawio/nlxheader.py +++ b/neo/rawio/neuralynxrawio/nlxheader.py @@ -185,7 +185,6 @@ def read_properties(self, filename, txt_header): :param filename: name of ncs file, used for extracting channel number :param txt_header: header text """ - print(txt_header) # find keys for k1, k2, type_ in NlxHeader.txt_header_keys: pattern = r"-(?P" + k1 + r")\s+(?P.+)" From 4733eb0f8b3dd0e499a78fecbcd73a706b5ddf40 Mon Sep 17 00:00:00 2001 From: LEGion-42 Date: Sat, 25 May 2024 00:06:02 -0400 Subject: [PATCH 13/24] feat(NeuralynxRawIO): finish most work for nvt file IO, some bugs still need fixing --- neo/rawio/neuralynxrawio/neuralynxrawio.py | 229 ++++++++++++++++++--- neo/rawio/neuralynxrawio/nlxheader.py | 4 +- 2 files changed, 198 insertions(+), 35 deletions(-) diff --git a/neo/rawio/neuralynxrawio/neuralynxrawio.py b/neo/rawio/neuralynxrawio/neuralynxrawio.py index 00283b458..5028d145d 100644 --- a/neo/rawio/neuralynxrawio/neuralynxrawio.py +++ b/neo/rawio/neuralynxrawio/neuralynxrawio.py @@ -47,8 +47,8 @@ from ..baserawio import ( BaseRawIO, - _signal_channel_dtype, _signal_stream_dtype, + _signal_channel_dtype, _spike_channel_dtype, _event_channel_dtype, ) @@ -131,6 +131,20 @@ class NeuralynxRawIO(BaseRawIO): ("samples", "int16", (NcsSection._RECORD_SIZE)), ] + _nvt_dtype = [ + ("swstx", "uint16"), + ("system_id", "uint16"), + ("data_size", "uint16"), + ("timestamp", "uint64"), + ("bitfield_points", "uint32", (400,)), + ("unused", "int16"), + ("test", "int32", (3,)), + # ("x_location", "int32"), + # ("y_location", "int32"), + # ("head_angle", "int32"), + ("colored_tgts", "int32", (50,)), + ] + def __init__( self, dirname="", filename="", exclude_filename=None, keep_original_times=False, strict_gap_mode=True, **kargs ): @@ -156,6 +170,12 @@ def _source_name(self): return self.dirname def _parse_header(self): + _ncs_sample_dtype = [dtype[1] for dtype in NeuralynxRawIO._ncs_dtype if dtype[0] == "samples"][0] + # _nvt_sample_dtype = [dtype[1] for dtype in NeuralynxRawIO._nvt_dtype if dtype[0] == "x_location"][0] + _nvt_sample_dtype = "int32" + + ncs_unit = "#packet" + nvt_unit = "#frame" stream_channels = [] signal_channels = [] @@ -165,16 +185,20 @@ def _parse_header(self): self.ncs_filenames = OrderedDict() # (chan_name, chan_id): filename self.nse_ntt_filenames = OrderedDict() # (chan_name, chan_id): filename self.nev_filenames = OrderedDict() # chan_id: filename + self.nvt_filenames = OrderedDict() # chan_id: filename self.file_headers = OrderedDict() # filename: file header dict self._nev_memmap = {} self._spike_memmap = {} + self._nvt_memmaps = [] self.internal_unit_ids = [] # channel_index > ((channel_name, channel_id), unit_id) self.internal_event_ids = [] + self.tracker_system_ids = [] self._empty_ncs = [] # this list contains filenames of empty files self._empty_nev = [] self._empty_nse_ntt = [] + self._empty_nvt = [] # Explore the directory looking for ncs, nev, nse and ntt # and construct channels headers. @@ -218,8 +242,11 @@ def _parse_header(self): # Skip Ncs files with only header. Other empty file types # will have an empty dataset constructed later. - if (os.path.getsize(filename) <= NlxHeader.HEADER_SIZE) and ext in ["ncs"]: - self._empty_ncs.append(filename) + if (os.path.getsize(filename) <= NlxHeader.HEADER_SIZE) and ext in ["ncs", "nvt"]: + if ext == "ncs": + self._empty_ncs.append(filename) + elif ext == "nvt": + self._empty_nvt.append(filename) continue # All file have more or less the same header structure @@ -253,7 +280,7 @@ def _parse_header(self): gain *= -1 offset = 0.0 signal_channels.append( - (chan_name, str(chan_id), info["sampling_rate"], "int16", units, gain, offset, stream_id) + (chan_name, str(chan_id), info["sampling_rate"], _ncs_sample_dtype, units, gain, offset, stream_id) ) self.ncs_filenames[chan_uid] = filename keys = [ @@ -340,6 +367,50 @@ def _parse_header(self): self._nev_memmap[chan_id] = data + # nvt files are passed as signals bundled into signal streams + elif ext == "nvt": + file_mmap = self._get_file_map(filename) + + system_id = np.unique(file_mmap["system_id"]).item() + chan_uid = (chan_name, str(system_id)) + self.nvt_filenames[chan_uid] = filename + + n_frames = copy.copy(file_mmap.shape[0]) + if n_frames: + t_start = copy.copy(file_mmap[0][3]) + if system_id not in self.tracker_system_ids: + self.tracker_system_ids.append(system_id) + else: + t_start = 0 + + stream_prop = (info["sampling_rate"], n_frames, t_start) + + if stream_prop not in stream_props: + stream_props[stream_prop] = {"stream_id": len(stream_props), "filenames": [filename]} + else: + stream_props[stream_prop]["filenames"].append(filename) + stream_id = stream_props[stream_prop]["stream_id"] + + units = "dimensionless" + gain = 1.0 + offset = info["CameraDelay"] # NOTE: assuming that the offset means time offset + signal_channels.append( + (chan_name, str(system_id), info["sampling_rate"], _nvt_sample_dtype, units, gain, offset, stream_id) + ) + + # NOTE: only loading the selected features here. "bitfield_points" and "colored_tgts" are not loaded due to their dimensionality + # self._nvt_memmap[chan_id] = file_mmap[["timestamp", "system_id", "x_location", "y_location", "head_angle"]] + self._nvt_memmaps.append({chan_uid : file_mmap[["timestamp", "system_id", "test"]]}) + # print(self._nvt_memmap[chan_id][:5]) + + keys = [ + "recording_opened", + "VideoFormat", + "Resolution", + ] + d = {k: info[k] for k in keys if k in info} + signal_annotations.append(d) + signal_channels = np.array(signal_channels, dtype=_signal_channel_dtype) spike_channels = np.array(spike_channels, dtype=_spike_channel_dtype) event_channels = np.array(event_channels, dtype=_event_channel_dtype) @@ -347,7 +418,10 @@ def _parse_header(self): if signal_channels.size > 0: # ordering streams according from high to low sampling rates stream_props = {k: stream_props[k] for k in sorted(stream_props, reverse=True)} - names = [f"Stream (rate,#packet,t0): {sp}" for sp in stream_props] + # assign different names to ncs stream and nvt stream + names = [f"Stream (rate,{ncs_unit},t0): {sp}" if pathlib.Path(stream_props[sp]["filenames"][0]).suffix.lower()[1:] == "ncs" else + f"Stream (rate,{nvt_unit},t0): {sp}" + for sp in stream_props] ids = [stream_prop["stream_id"] for stream_prop in stream_props.values()] signal_streams = list(zip(names, ids)) else: @@ -359,26 +433,39 @@ def _parse_header(self): self._timestamp_limits = None self._nb_segment = 1 - stream_infos = {} + ncs_stream_infos = {} + nvt_stream_infos = {} # Read ncs files of each stream for gap detection and nb_segment computation. + # Since nvt files are passed as signals, we need to filter them out. for stream_id in np.unique(signal_channels["stream_id"]): stream_channels = signal_channels[signal_channels["stream_id"] == stream_id] stream_chan_uids = zip(stream_channels["name"], stream_channels["id"]) - stream_filenames = [self.ncs_filenames[chuid] for chuid in stream_chan_uids] - _sigs_memmaps, ncsSegTimestampLimits, section_structure = self.scan_stream_ncs_files(stream_filenames) - - stream_infos[stream_id] = { - "segment_sig_memmaps": _sigs_memmaps, - "ncs_segment_infos": ncsSegTimestampLimits, - "section_structure": section_structure, - } + # ncs files have dtype int16 while nvt files have dtype int32, so we use this to filter out nvt files + if (stream_channels["dtype"] == _ncs_sample_dtype).all(): + stream_filenames = [self.ncs_filenames[chuid] for chuid in stream_chan_uids] + _sigs_memmaps, ncsSegTimestampLimits, section_structure = self.scan_stream_ncs_files(stream_filenames) + + ncs_stream_infos[stream_id] = { + "segment_sig_memmaps": _sigs_memmaps, + "ncs_segment_infos": ncsSegTimestampLimits, + "section_structure": section_structure, + } + + else: + stream_filenames = [self.nvt_filenames[chuid] for chuid in stream_chan_uids] + + nvt_stream_infos[stream_id] = { + "segment_sig_memmaps": self._nvt_memmaps, + "nvt_segment_infos": self.generate_nvt_seg_infos(), + "section_structure": None, + } # check if section structure across streams is compatible and merge infos ref_sec_structure = None - for stream_id, stream_info in stream_infos.items(): - ref_stream_id = list(stream_infos.keys())[0] - ref_sec_structure = stream_infos[ref_stream_id]["section_structure"] + for stream_id, stream_info in ncs_stream_infos.items(): + ref_stream_id = list(ncs_stream_infos.keys())[0] + ref_sec_structure = ncs_stream_infos[ref_stream_id]["section_structure"] sec_structure = stream_info["section_structure"] @@ -395,8 +482,11 @@ def _parse_header(self): f"{stream_id}:{chan_ids}." ) + nvt_segments = 0 + if nvt_stream_infos: + nvt_segments += sum(nvt_stream_info["nvt_segment_infos"].nb_segment for nvt_stream_info in nvt_stream_infos.values()) if ref_sec_structure is not None: - self._nb_segment = len(ref_sec_structure.sects) + self._nb_segment = len(ref_sec_structure.sects) + nvt_segments else: # Use only a single segment if no ncs data is present self._nb_segment = 1 @@ -407,13 +497,13 @@ def min_max_tuple(tuple1, tuple2): result = (min(m for m in mins if m is not None), max(m for m in maxs if m is not None)) return result - # merge stream mmemmaps since streams are compatible + # merge stream memmaps since streams are compatible self._sigs_memmaps = [{} for seg_idx in range(self._nb_segment)] # time limits of integer timestamps in ncs files self._timestamp_limits = [(None, None) for seg_idx in range(self._nb_segment)] # time limits physical times in ncs files self._signal_limits = [(None, None) for seg_idx in range(self._nb_segment)] - for stream_id, stream_info in stream_infos.items(): + for stream_id, stream_info in ncs_stream_infos.items(): stream_mmaps = stream_info["segment_sig_memmaps"] for seg_idx, signal_dict in enumerate(stream_mmaps): self._sigs_memmaps[seg_idx].update(signal_dict) @@ -426,16 +516,32 @@ def min_max_tuple(tuple1, tuple2): t_start = ncs_segment_info.t_start[seg_idx] t_stop = ncs_segment_info.t_stop[seg_idx] self._signal_limits[seg_idx] = min_max_tuple(self._signal_limits[seg_idx], (t_start, t_stop)) + + seg_offset = len(ref_sec_structure.sects) + for stream_id, stream_info in nvt_stream_infos.items(): + stream_mmaps = stream_info["segment_sig_memmaps"] + for seg_idx, signal_dict in enumerate(stream_mmaps): + self._sigs_memmaps[seg_idx + seg_offset].update(signal_dict) + + nvt_segment_info = stream_info["nvt_segment_infos"] + for seg_idx, (t_start, t_stop) in enumerate(nvt_segment_info.timestamp_limits): + self._timestamp_limits[seg_idx + seg_offset] = min_max_tuple(self._timestamp_limits[seg_idx], (t_start, t_stop)) + + for seg_idx in range(nvt_segment_info.nb_segment): + t_start = nvt_segment_info.t_start[seg_idx] + t_stop = nvt_segment_info.t_stop[seg_idx] + self._signal_limits[seg_idx + seg_offset] = min_max_tuple(self._signal_limits[seg_idx], (t_start, t_stop)) # precompute signal lengths within segments self._sigs_length = [] if self._sigs_memmaps: for seg_idx, sig_container in enumerate(self._sigs_memmaps): self._sigs_length.append({}) + print(seg_idx, type(sig_container)) for chan_uid, sig_infos in sig_container.items(): self._sigs_length[seg_idx][chan_uid] = int(sig_infos["nb_valid"].sum()) - # Determine timestamp limits in nev, nse, ntt files by scanning them. + # Determine timestamp limits in nse, ntt, nev files by scanning them. ts0, ts1 = None, None for _data_memmap in (self._spike_memmap, self._nev_memmap): for _, data in _data_memmap.items(): @@ -472,7 +578,7 @@ def min_max_tuple(tuple1, tuple2): self._seg_t_stops[-1] = self.global_t_stop else: - # case HAVE ncs but NO nev or nse -> + # case HAVE ncs but NO nev or nse -> self._seg_t_starts = [limits[0] for limits in self._signal_limits] self._seg_t_stops = [limits[1] for limits in self._signal_limits] self.global_t_start = self._signal_limits[0][0] @@ -495,23 +601,48 @@ def min_max_tuple(tuple1, tuple2): self._generate_minimal_annotations() bl_annotations = self.raw_annotations["blocks"][0] + # generate key sets for ncs and nvt annotations + key_sets = np.unique([d.keys() for d in signal_annotations]) + nvt_key_set = {} + ncs_key_set = {} + for key_set in key_sets: + if "Resolution" in key_set: + nvt_key_set = key_set + else: + ncs_key_set = key_set + for seg_index in range(self._nb_segment): seg_annotations = bl_annotations["segments"][seg_index] for stream_id in range(signal_streams.size): # one or no signal stream stream_ann = seg_annotations["signals"][stream_id] - # handle array annotations - for key in signal_annotations[0].keys(): - values = [] - # only collect values from channels belonging to current stream - for d in np.where(signal_channels["stream_id"] == f"{stream_id}")[0]: - value = signal_annotations[d][key] - values.append(value) - values = np.array(values) - if values.ndim == 1: - # 'InputRange': is 2D and make bugs - stream_ann["__array_annotations__"][key] = values + + if ncs_unit in stream_ann["name"]: + # handle array annotations + for key in ncs_key_set: + values = [] + # only collect values from channels belonging to current stream + for d in np.where(signal_channels["stream_id"] == f"{stream_id}")[0]: + value = signal_annotations[d][key] + values.append(value) + values = np.array(values) + if values.ndim == 1: + # 'InputRange': is 2D and make bugs + stream_ann["__array_annotations__"][key] = values + + elif nvt_unit in stream_ann["name"]: + for key in nvt_key_set: + values = [] + for d in np.where(signal_channels["stream_id"] == f"{stream_id}")[0]: + value = signal_annotations[d][key] + values.append(value) + values = np.array(values) + if values.ndim == 1: + stream_ann["__array_annotations__"][key] = values + + else: + continue for c in range(spike_channels.size): unit_ann = seg_annotations["spikes"][c] @@ -554,6 +685,9 @@ def _get_file_map(filename): elif suffix == "nev": return np.memmap(filename, dtype=nev_dtype, mode="r", offset=NlxHeader.HEADER_SIZE) + + elif suffix == "nvt": + return np.memmap(filename, dtype=NeuralynxRawIO._nvt_dtype, mode="r", offset=NlxHeader.HEADER_SIZE) else: raise ValueError(f"Unknown file suffix {suffix}") @@ -572,6 +706,7 @@ def _get_signal_size(self, block_index, seg_index, stream_index): if len(signals): sig = signals[0] + print("test", self._sigs_length[seg_index]) return self._sigs_length[seg_index][(sig["name"], sig["id"])] else: raise ValueError( @@ -861,6 +996,34 @@ def scan_stream_ncs_files(self, ncs_filenames): stream_section_structure = list(revSectMap.keys())[0] return memmaps, seg_time_limits, stream_section_structure + + def generate_nvt_seg_infos(self): + """ + TODO: write this later. + """ + # HACK: nb_segments assumed to be 1 for now + seg_time_limits = SegmentTimeLimits( + nb_segment=1, t_start=[], t_stop=[], length=[], timestamp_limits=[] + ) + + ts0, ts1 = None, None + for nvt_memmap in self._nvt_memmaps: + for _, data in nvt_memmap.items(): + seg_time_limits.length.append(data.shape[0]) + ts = data["timestamp"] + + if ts.size == 0: + continue + if ts0 is None: + ts0 = ts[0] + ts1 = ts[-1] + + ts0, ts1 = min(ts0, ts[0]), max(ts1, ts[-1]) + seg_time_limits.t_start.append(ts0 / 1e6) + seg_time_limits.t_stop.append(ts1 / 1e6) + seg_time_limits.timestamp_limits.append((ts0, ts1)) + + return seg_time_limits # time limits for set of segments diff --git a/neo/rawio/neuralynxrawio/nlxheader.py b/neo/rawio/neuralynxrawio/nlxheader.py index 45caadfbb..38758a94c 100644 --- a/neo/rawio/neuralynxrawio/nlxheader.py +++ b/neo/rawio/neuralynxrawio/nlxheader.py @@ -78,8 +78,8 @@ def _to_bool(txt): ("Sharpness", "", int), ("DirectionOffset", "", int), ("Resolution", "", None), - ("CameraDelay", "", int), - ("EnableFieldEstimation", "field_estimation_enabled", _to_bool), + ("CameraDelay", "", float), + ("EnableFieldEstimation", "", _to_bool), ("TargetDist", "", None), ] From 4b845a18170397b5e6dfe7c199883946d9cee670 Mon Sep 17 00:00:00 2001 From: LEGion-42 Date: Sat, 25 May 2024 16:51:22 -0400 Subject: [PATCH 14/24] feat(NeuralynxRawIO): add nvt support Only loading dnextracted_x, dnextracted_y and dnextracted_angle as 3 dimensionless signal channels. Only tested on directories with single nvt file. --- neo/rawio/neuralynxrawio/neuralynxrawio.py | 213 ++++++++++++--------- 1 file changed, 121 insertions(+), 92 deletions(-) diff --git a/neo/rawio/neuralynxrawio/neuralynxrawio.py b/neo/rawio/neuralynxrawio/neuralynxrawio.py index 5028d145d..fb58a83b3 100644 --- a/neo/rawio/neuralynxrawio/neuralynxrawio.py +++ b/neo/rawio/neuralynxrawio/neuralynxrawio.py @@ -54,6 +54,7 @@ ) from operator import itemgetter import numpy as np +import pdb import os import pathlib import copy @@ -138,10 +139,9 @@ class NeuralynxRawIO(BaseRawIO): ("timestamp", "uint64"), ("bitfield_points", "uint32", (400,)), ("unused", "int16"), - ("test", "int32", (3,)), - # ("x_location", "int32"), - # ("y_location", "int32"), - # ("head_angle", "int32"), + ("x_location", "int32"), + ("y_location", "int32"), + ("head_angle", "int32"), ("colored_tgts", "int32", (50,)), ] @@ -171,11 +171,7 @@ def _source_name(self): def _parse_header(self): _ncs_sample_dtype = [dtype[1] for dtype in NeuralynxRawIO._ncs_dtype if dtype[0] == "samples"][0] - # _nvt_sample_dtype = [dtype[1] for dtype in NeuralynxRawIO._nvt_dtype if dtype[0] == "x_location"][0] - _nvt_sample_dtype = "int32" - - ncs_unit = "#packet" - nvt_unit = "#frame" + _nvt_sample_dtype = [dtype[1] for dtype in NeuralynxRawIO._nvt_dtype if dtype[0] == "x_location"][0] stream_channels = [] signal_channels = [] @@ -369,47 +365,47 @@ def _parse_header(self): # nvt files are passed as signals bundled into signal streams elif ext == "nvt": - file_mmap = self._get_file_map(filename) - system_id = np.unique(file_mmap["system_id"]).item() - chan_uid = (chan_name, str(system_id)) - self.nvt_filenames[chan_uid] = filename + units = "dimensionless" + gain = 1.0 + offset = info["CameraDelay"] # NOTE: assuming that the offset means time offset - n_frames = copy.copy(file_mmap.shape[0]) - if n_frames: - t_start = copy.copy(file_mmap[0][3]) - if system_id not in self.tracker_system_ids: - self.tracker_system_ids.append(system_id) - else: - t_start = 0 + # TODO: to support multiple files, we need to adjust range since i must be unique + for i in range(len(nvt_selected_features)): + file_mmap = self._get_file_map(filename) - stream_prop = (info["sampling_rate"], n_frames, t_start) + chan_uid = (chan_name, str(i)) + self.nvt_filenames[chan_uid] = filename - if stream_prop not in stream_props: - stream_props[stream_prop] = {"stream_id": len(stream_props), "filenames": [filename]} - else: - stream_props[stream_prop]["filenames"].append(filename) - stream_id = stream_props[stream_prop]["stream_id"] + n_frames = copy.copy(file_mmap.shape[0]) + if n_frames: + t_start = copy.copy(file_mmap[0][3]) + if i not in self.tracker_system_ids: + self.tracker_system_ids.append(i) + else: + t_start = 0 - units = "dimensionless" - gain = 1.0 - offset = info["CameraDelay"] # NOTE: assuming that the offset means time offset - signal_channels.append( - (chan_name, str(system_id), info["sampling_rate"], _nvt_sample_dtype, units, gain, offset, stream_id) - ) + stream_prop = (info["sampling_rate"], n_frames, t_start) - # NOTE: only loading the selected features here. "bitfield_points" and "colored_tgts" are not loaded due to their dimensionality - # self._nvt_memmap[chan_id] = file_mmap[["timestamp", "system_id", "x_location", "y_location", "head_angle"]] - self._nvt_memmaps.append({chan_uid : file_mmap[["timestamp", "system_id", "test"]]}) - # print(self._nvt_memmap[chan_id][:5]) + if stream_prop not in stream_props: + stream_props[stream_prop] = {"stream_id": len(stream_props), "filenames": [filename]} + else: + stream_props[stream_prop]["filenames"].append(filename) + stream_id = stream_props[stream_prop]["stream_id"] - keys = [ - "recording_opened", - "VideoFormat", - "Resolution", - ] - d = {k: info[k] for k in keys if k in info} - signal_annotations.append(d) + signal_channels.append((chan_name, str(i), info["sampling_rate"], _nvt_sample_dtype, units, gain, offset, stream_id)) + + # NOTE: only loading the selected features here. "bitfield_points" and "colored_tgts" are not loaded due to their dimensionality + # self._nvt_memmap[chan_id] = file_mmap[["timestamp", "system_id", "x_location", "y_location", "head_angle"]] + self._nvt_memmaps.append({chan_uid : file_mmap[["timestamp", nvt_selected_features[i]]]}) + + keys = [ + # "recording_opened", + # "VideoFormat", + # "Resolution", + ] + d = {k: info[k] for k in keys if k in info} + signal_annotations.append(d) signal_channels = np.array(signal_channels, dtype=_signal_channel_dtype) spike_channels = np.array(spike_channels, dtype=_spike_channel_dtype) @@ -451,13 +447,16 @@ def _parse_header(self): "ncs_segment_infos": ncsSegTimestampLimits, "section_structure": section_structure, } + # list with 1 element (a single dict), keys are chan_uids, values are memmaps + # print(ncs_stream_infos[stream_id]["segment_sig_memmaps"]) else: stream_filenames = [self.nvt_filenames[chuid] for chuid in stream_chan_uids] + nvt_memmaps, time_infos = self.generate_nvt_seg_infos() nvt_stream_infos[stream_id] = { - "segment_sig_memmaps": self._nvt_memmaps, - "nvt_segment_infos": self.generate_nvt_seg_infos(), + "segment_sig_memmaps": nvt_memmaps, + "nvt_segment_infos": time_infos, "section_structure": None, } @@ -482,11 +481,8 @@ def _parse_header(self): f"{stream_id}:{chan_ids}." ) - nvt_segments = 0 - if nvt_stream_infos: - nvt_segments += sum(nvt_stream_info["nvt_segment_infos"].nb_segment for nvt_stream_info in nvt_stream_infos.values()) if ref_sec_structure is not None: - self._nb_segment = len(ref_sec_structure.sects) + nvt_segments + self._nb_segment = len(ref_sec_structure.sects) else: # Use only a single segment if no ncs data is present self._nb_segment = 1 @@ -517,29 +513,30 @@ def min_max_tuple(tuple1, tuple2): t_stop = ncs_segment_info.t_stop[seg_idx] self._signal_limits[seg_idx] = min_max_tuple(self._signal_limits[seg_idx], (t_start, t_stop)) - seg_offset = len(ref_sec_structure.sects) for stream_id, stream_info in nvt_stream_infos.items(): stream_mmaps = stream_info["segment_sig_memmaps"] for seg_idx, signal_dict in enumerate(stream_mmaps): - self._sigs_memmaps[seg_idx + seg_offset].update(signal_dict) + self._sigs_memmaps[seg_idx].update(signal_dict) nvt_segment_info = stream_info["nvt_segment_infos"] for seg_idx, (t_start, t_stop) in enumerate(nvt_segment_info.timestamp_limits): - self._timestamp_limits[seg_idx + seg_offset] = min_max_tuple(self._timestamp_limits[seg_idx], (t_start, t_stop)) + self._timestamp_limits[seg_idx] = min_max_tuple(self._timestamp_limits[seg_idx], (t_start, t_stop)) for seg_idx in range(nvt_segment_info.nb_segment): t_start = nvt_segment_info.t_start[seg_idx] t_stop = nvt_segment_info.t_stop[seg_idx] - self._signal_limits[seg_idx + seg_offset] = min_max_tuple(self._signal_limits[seg_idx], (t_start, t_stop)) + self._signal_limits[seg_idx] = min_max_tuple(self._signal_limits[seg_idx], (t_start, t_stop)) # precompute signal lengths within segments self._sigs_length = [] if self._sigs_memmaps: for seg_idx, sig_container in enumerate(self._sigs_memmaps): self._sigs_length.append({}) - print(seg_idx, type(sig_container)) for chan_uid, sig_infos in sig_container.items(): - self._sigs_length[seg_idx][chan_uid] = int(sig_infos["nb_valid"].sum()) + if sig_infos[0].dtype == NeuralynxRawIO._ncs_dtype: + self._sigs_length[seg_idx][chan_uid] = int(sig_infos["nb_valid"].sum()) + else: + self._sigs_length[seg_idx][chan_uid] = sig_infos.shape[0] # Determine timestamp limits in nse, ntt, nev files by scanning them. ts0, ts1 = None, None @@ -638,8 +635,7 @@ def min_max_tuple(tuple1, tuple2): value = signal_annotations[d][key] values.append(value) values = np.array(values) - if values.ndim == 1: - stream_ann["__array_annotations__"][key] = values + stream_ann["__array_annotations__"][key] = values else: continue @@ -706,7 +702,6 @@ def _get_signal_size(self, block_index, seg_index, stream_index): if len(signals): sig = signals[0] - print("test", self._sigs_length[seg_index]) return self._sigs_length[seg_index][(sig["name"], sig["id"])] else: raise ValueError( @@ -720,7 +715,6 @@ def _get_signal_t_start(self, block_index, seg_index, stream_index): # use first channel of stream as all channels in stream have a common t_start channel = self.header["signal_channels"][stream_mask][0] - data = self._sigs_memmaps[seg_index][(channel["name"], channel["id"])] absolute_t_start = data["timestamp"][0] @@ -747,34 +741,55 @@ def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, strea ------- array of samples, with each requested channel in a column """ - if i_start is None: - i_start = 0 - if i_stop is None: - i_stop = self.get_signal_size(block_index=block_index, seg_index=seg_index, stream_index=stream_index) + if ncs_unit in str(self.header["signal_streams"][stream_index]): + if i_start is None: + i_start = 0 + if i_stop is None: + i_stop = self.get_signal_size(block_index=block_index, seg_index=seg_index, stream_index=stream_index) - block_start = i_start // NcsSection._RECORD_SIZE - block_stop = i_stop // NcsSection._RECORD_SIZE + 1 - sl0 = i_start % 512 - sl1 = sl0 + (i_stop - i_start) + block_start = i_start // NcsSection._RECORD_SIZE + block_stop = i_stop // NcsSection._RECORD_SIZE + 1 + sl0 = i_start % 512 + sl1 = sl0 + (i_stop - i_start) - if channel_indexes is None: - channel_indexes = slice(None) + if channel_indexes is None: + channel_indexes = slice(None) - stream_id = self.header["signal_streams"][stream_index]["id"] - stream_mask = self.header["signal_channels"]["stream_id"] == stream_id + stream_id = self.header["signal_streams"][stream_index]["id"] + stream_mask = self.header["signal_channels"]["stream_id"] == stream_id - channel_ids = self.header["signal_channels"][stream_mask][channel_indexes]["id"] - channel_names = self.header["signal_channels"][stream_mask][channel_indexes]["name"] + channel_ids = self.header["signal_channels"][stream_mask][channel_indexes]["id"] + channel_names = self.header["signal_channels"][stream_mask][channel_indexes]["name"] - # create buffer for samples - sigs_chunk = np.zeros((i_stop - i_start, len(channel_ids)), dtype="int16") + # create buffer for samples + sigs_chunk = np.zeros((i_stop - i_start, len(channel_ids)), dtype="int16") + + for i, chan_uid in enumerate(zip(channel_names, channel_ids)): + data = self._sigs_memmaps[seg_index][chan_uid] + sub = data[block_start:block_stop] + sigs_chunk[:, i] = sub["samples"].flatten()[sl0:sl1] + return sigs_chunk + + else: + if i_start is None: + i_start = 0 + if i_stop is None: + i_stop = self.get_signal_size(block_index=block_index, seg_index=seg_index, stream_index=stream_index) - for i, chan_uid in enumerate(zip(channel_names, channel_ids)): - data = self._sigs_memmaps[seg_index][chan_uid] - sub = data[block_start:block_stop] - sigs_chunk[:, i] = sub["samples"].flatten()[sl0:sl1] + stream_id = self.header["signal_streams"][stream_index]["id"] + stream_mask = self.header["signal_channels"]["stream_id"] == stream_id - return sigs_chunk + # HACK: for some reason channel_ids and channel_names have an extra dimension, adding [0] fixes it temporarily + channel_ids = self.header["signal_channels"][stream_mask][channel_indexes]["id"][0] + channel_names = self.header["signal_channels"][stream_mask][channel_indexes]["name"][0] + + sig_chunk = np.zeros((i_stop - i_start, len(nvt_selected_features)), dtype="int32") + + for i, chan_uid in enumerate(zip(channel_names, channel_ids)): + data = self._sigs_memmaps[seg_index][chan_uid] + sig_chunk[:, i] = data[nvt_selected_features[i]] + + return sig_chunk def _spike_count(self, block_index, seg_index, unit_index): chan_uid, unit_id = self.internal_unit_ids[unit_index] @@ -999,32 +1014,46 @@ def scan_stream_ncs_files(self, ncs_filenames): def generate_nvt_seg_infos(self): """ + NOTE: this will not work on multiple nvt files, this is just a temporary solution + maybe add a check for multiple nvt files and raise an error if there are more than one TODO: write this later. """ - # HACK: nb_segments assumed to be 1 for now + # HACK: nb_segments assumed to be 1, it really doesn't matter for now seg_time_limits = SegmentTimeLimits( nb_segment=1, t_start=[], t_stop=[], length=[], timestamp_limits=[] ) + memmaps ={} + prev_chan_name = None ts0, ts1 = None, None for nvt_memmap in self._nvt_memmaps: - for _, data in nvt_memmap.items(): - seg_time_limits.length.append(data.shape[0]) - ts = data["timestamp"] + for key, data in nvt_memmap.items(): + chan_name = key[0] + if chan_name != prev_chan_name: + prev_chan_name = chan_name + seg_time_limits.length.append(data.shape[0]) + ts = data["timestamp"] - if ts.size == 0: - continue - if ts0 is None: ts0 = ts[0] ts1 = ts[-1] - ts0, ts1 = min(ts0, ts[0]), max(ts1, ts[-1]) - seg_time_limits.t_start.append(ts0 / 1e6) - seg_time_limits.t_stop.append(ts1 / 1e6) - seg_time_limits.timestamp_limits.append((ts0, ts1)) + ts0, ts1 = min(ts0, ts[0]), max(ts1, ts[-1]) + seg_time_limits.t_start.append(ts0 / 1e6) + seg_time_limits.t_stop.append(ts1 / 1e6) + seg_time_limits.timestamp_limits.append((ts0, ts1)) + + if chan_name not in memmaps: + memmaps[chan_name] = {} + memmaps[chan_name][key] = data + + + memmaps = list(memmaps.values()) - return seg_time_limits + return memmaps, seg_time_limits +ncs_unit = "#packet" +nvt_unit = "#frame" +nvt_selected_features = ["x_location", "y_location", "head_angle"] # time limits for set of segments SegmentTimeLimits = namedtuple("SegmentTimeLimits", ["nb_segment", "t_start", "t_stop", "length", "timestamp_limits"]) From 872a82fa7e6da37062cdf02f977712422df2653b Mon Sep 17 00:00:00 2001 From: LEGion-42 Date: Sat, 25 May 2024 18:07:35 -0400 Subject: [PATCH 15/24] feat(NeuralynxRawIO): add Resolution to AnalogSignal.array_annotations as literal string of a list --- neo/rawio/neuralynxrawio/neuralynxrawio.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/neo/rawio/neuralynxrawio/neuralynxrawio.py b/neo/rawio/neuralynxrawio/neuralynxrawio.py index fb58a83b3..4f8441974 100644 --- a/neo/rawio/neuralynxrawio/neuralynxrawio.py +++ b/neo/rawio/neuralynxrawio/neuralynxrawio.py @@ -54,7 +54,6 @@ ) from operator import itemgetter import numpy as np -import pdb import os import pathlib import copy @@ -173,6 +172,8 @@ def _parse_header(self): _ncs_sample_dtype = [dtype[1] for dtype in NeuralynxRawIO._ncs_dtype if dtype[0] == "samples"][0] _nvt_sample_dtype = [dtype[1] for dtype in NeuralynxRawIO._nvt_dtype if dtype[0] == "x_location"][0] + nvt_counter = 0 + stream_channels = [] signal_channels = [] spike_channels = [] @@ -363,8 +364,11 @@ def _parse_header(self): self._nev_memmap[chan_id] = data - # nvt files are passed as signals bundled into signal streams + # nvt file is passed as signals bundled into a signal stream separate from the ncs stream elif ext == "nvt": + nvt_counter += 1 + if nvt_counter > 1: + raise ValueError("Reading multiple nvt files in one session are not yet supported.") units = "dimensionless" gain = 1.0 @@ -399,10 +403,11 @@ def _parse_header(self): # self._nvt_memmap[chan_id] = file_mmap[["timestamp", "system_id", "x_location", "y_location", "head_angle"]] self._nvt_memmaps.append({chan_uid : file_mmap[["timestamp", nvt_selected_features[i]]]}) + info["Resolution"] = str(info["Resolution"]) keys = [ - # "recording_opened", - # "VideoFormat", - # "Resolution", + "recording_opened", + "VideoFormat", + "Resolution", ] d = {k: info[k] for k in keys if k in info} signal_annotations.append(d) @@ -635,7 +640,8 @@ def min_max_tuple(tuple1, tuple2): value = signal_annotations[d][key] values.append(value) values = np.array(values) - stream_ann["__array_annotations__"][key] = values + if values.ndim == 1: + stream_ann["__array_annotations__"][key] = values else: continue From a88000ecab0185e30a4f22b9721c8cbea7572ff3 Mon Sep 17 00:00:00 2001 From: LEGion-42 Date: Sat, 25 May 2024 18:09:01 -0400 Subject: [PATCH 16/24] chore: update gitignore --- .gitignore | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 0f023f9fb..15c9cccfb 100644 --- a/.gitignore +++ b/.gitignore @@ -80,4 +80,5 @@ doc/*.nwb *.plx *.smr B95.zip -grouped_ephys \ No newline at end of file +grouped_ephys +test.py From c253ec45270d601a018fd1b3f848f6537851a520 Mon Sep 17 00:00:00 2001 From: LEGion-42 Date: Sat, 25 May 2024 18:14:33 -0400 Subject: [PATCH 17/24] chore: update .gitignore --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 15c9cccfb..1d450b96b 100644 --- a/.gitignore +++ b/.gitignore @@ -81,4 +81,4 @@ doc/*.nwb *.smr B95.zip grouped_ephys -test.py +nvt_test.py From da31bc204426968e63dec2ed149e0d6eb1c14a8e Mon Sep 17 00:00:00 2001 From: LEGion-42 Date: Sat, 25 May 2024 21:23:23 -0400 Subject: [PATCH 18/24] chore(NeuralynxIO): add code documentation and option to ignore nvt files for NeuralynxIO and NeuralynxRawIO --- neo/io/neuralynxio.py | 10 ++- neo/rawio/neuralynxrawio/neuralynxrawio.py | 86 +++++++++++++--------- 2 files changed, 60 insertions(+), 36 deletions(-) diff --git a/neo/io/neuralynxio.py b/neo/io/neuralynxio.py index ed5cd0217..03139dad5 100644 --- a/neo/io/neuralynxio.py +++ b/neo/io/neuralynxio.py @@ -16,12 +16,13 @@ class NeuralynxIO(NeuralynxRawIO, BaseFromRaw): """ Class for reading data from Neuralynx files. - This IO supports NCS, NEV, NSE and NTT file formats. + This IO supports NCS, NEV, NSE, NTT and NVT file formats. NCS contains signals for one channel NEV contains events NSE contains spikes and waveforms for mono electrodes NTT contains spikes and waveforms for tetrodes + NVT contains coordinates and head angles for video tracking """ _prefered_signal_group_mode = "group-by-same-units" @@ -35,6 +36,7 @@ def __init__( cache_path="same_as_resource", exclude_filename=None, keep_original_times=False, + ignore_nvt=False, ): """ Initialise IO instance @@ -59,6 +61,11 @@ def __init__( Preserve original time stamps as in data files. By default datasets are shifted to begin at t_start = 0*pq.second. Default: False + ignore_nvt : bool + Ignore NVT files when loading data. This is only a temporary argument before + support for multiple NVT files are added. Turn it on if there are multiple NVT + files in the directory. + Default: False """ NeuralynxRawIO.__init__( self, @@ -68,6 +75,7 @@ def __init__( cache_path=cache_path, exclude_filename=exclude_filename, keep_original_times=keep_original_times, + ignore_nvt=ignore_nvt, ) if self.rawmode == "one-file": BaseFromRaw.__init__(self, filename) diff --git a/neo/rawio/neuralynxrawio/neuralynxrawio.py b/neo/rawio/neuralynxrawio/neuralynxrawio.py index 4f8441974..1e0b89731 100644 --- a/neo/rawio/neuralynxrawio/neuralynxrawio.py +++ b/neo/rawio/neuralynxrawio/neuralynxrawio.py @@ -1,12 +1,12 @@ """ Class for reading data from Neuralynx files. -This IO supports NCS, NEV, NSE and NTT file formats. - +This IO supports NCS, NEV, NSE, NTT and NVT file formats. NCS contains the sampled signal for one channel NEV contains events NSE contains spikes and waveforms for mono electrodes NTT contains spikes and waveforms for tetrodes +NVT contains coordinates and head angles for video tracking All Neuralynx files contain a 16 kilobyte text header followed by 0 or more fixed length records. The format of the header has never been formally specified, however, the Neuralynx programs which @@ -38,6 +38,17 @@ outside of Segments defined by .Ncs files will be ignored. To access all time point data in a single Segment load a session excluding .Ncs files. +This RawIO only partially support the NVT file format, limitations include: + * Only loads the dnextracted_x, dnextracted_y and dnextracted_angle data fields from NVT files. + Other fields that could be potentially useful (dwPoints and dntargets) are not yet supported + due to their format. + * Only a single NVT file can be loaded per session. + * The NVT is assumed to be in the same segment (sharing a common clock (time basis)) as the + NCS files. + +The x and y pixel coordinates and animal head angle from the nvt file are treated as dimensionless +analog signals bundled into a signal stream separate from the NCS stream. + Continuous data streams are ordered by descending sampling rate. This RawIO presents only a single Block. @@ -91,10 +102,15 @@ class NeuralynxRawIO(BaseRawIO): consecutive data packet is more than one sample interval. * strict_gap_mode = False then a gap has an increased tolerance. Some new system with different clock need this option otherwise, too many gaps are detected + ignore_nvt : bool + Ignore NVT files when loading data. This is only a temporary argument before + support for multiple NVT files are added. Turn it on if there are multiple NVT + files in the directory. + Default: False Notes ----- - * This IO supports NCS, NEV, NSE and NTT file formats (not NVT or NRD yet) + * This IO supports NCS, NEV, NSE and NTT file formats (not NRD yet) * These variations of header format and possible differences between the stated sampling frequency and actual sampling frequency can create apparent time discrepancies in .Ncs files. Additionally, @@ -120,7 +136,7 @@ class NeuralynxRawIO(BaseRawIO): Display all information about signal channels, units, segment size.... """ - extensions = ["nse", "ncs", "nev", "ntt", "nvt", "nrd"] # nvt and nrd are not yet supported + extensions = ["nse", "ncs", "nev", "ntt", "nvt", "nrd"] # nrd is not yet supported rawmode = "one-dir" _ncs_dtype = [ @@ -131,21 +147,9 @@ class NeuralynxRawIO(BaseRawIO): ("samples", "int16", (NcsSection._RECORD_SIZE)), ] - _nvt_dtype = [ - ("swstx", "uint16"), - ("system_id", "uint16"), - ("data_size", "uint16"), - ("timestamp", "uint64"), - ("bitfield_points", "uint32", (400,)), - ("unused", "int16"), - ("x_location", "int32"), - ("y_location", "int32"), - ("head_angle", "int32"), - ("colored_tgts", "int32", (50,)), - ] def __init__( - self, dirname="", filename="", exclude_filename=None, keep_original_times=False, strict_gap_mode=True, **kargs + self, dirname="", filename="", exclude_filename=None, keep_original_times=False, strict_gap_mode=True, ignore_nvt=False, **kargs ): if dirname != "": @@ -160,6 +164,7 @@ def __init__( self.keep_original_times = keep_original_times self.strict_gap_mode = strict_gap_mode self.exclude_filename = exclude_filename + self.ignore_nvt = ignore_nvt BaseRawIO.__init__(self, **kargs) def _source_name(self): @@ -170,7 +175,7 @@ def _source_name(self): def _parse_header(self): _ncs_sample_dtype = [dtype[1] for dtype in NeuralynxRawIO._ncs_dtype if dtype[0] == "samples"][0] - _nvt_sample_dtype = [dtype[1] for dtype in NeuralynxRawIO._nvt_dtype if dtype[0] == "x_location"][0] + _nvt_sample_dtype = [dtype[1] for dtype in _nvt_dtype if dtype[0] == "x_location"][0] nvt_counter = 0 @@ -182,7 +187,7 @@ def _parse_header(self): self.ncs_filenames = OrderedDict() # (chan_name, chan_id): filename self.nse_ntt_filenames = OrderedDict() # (chan_name, chan_id): filename self.nev_filenames = OrderedDict() # chan_id: filename - self.nvt_filenames = OrderedDict() # chan_id: filename + self.nvt_filenames = OrderedDict() self.file_headers = OrderedDict() # filename: file header dict @@ -197,7 +202,7 @@ def _parse_header(self): self._empty_nse_ntt = [] self._empty_nvt = [] - # Explore the directory looking for ncs, nev, nse and ntt + # Explore the directory looking for ncs, nev, nse, ntt and nvt files # and construct channels headers. signal_annotations = [] unit_annotations = [] @@ -237,7 +242,7 @@ def _parse_header(self): if ext not in self.extensions: continue - # Skip Ncs files with only header. Other empty file types + # Skip Ncs and nvt files with only header. Other empty file types # will have an empty dataset constructed later. if (os.path.getsize(filename) <= NlxHeader.HEADER_SIZE) and ext in ["ncs", "nvt"]: if ext == "ncs": @@ -365,16 +370,20 @@ def _parse_header(self): self._nev_memmap[chan_id] = data # nvt file is passed as signals bundled into a signal stream separate from the ncs stream - elif ext == "nvt": + elif ext == "nvt" and not self.ignore_nvt: nvt_counter += 1 if nvt_counter > 1: - raise ValueError("Reading multiple nvt files in one session are not yet supported.") + raise ValueError(""" + Reading multiple nvt files in one session are not yet supported. + Try loading each nvt files separately or set ignore_nvt=True. + """) units = "dimensionless" gain = 1.0 offset = info["CameraDelay"] # NOTE: assuming that the offset means time offset - # TODO: to support multiple files, we need to adjust range since i must be unique + # treating each feature as a separate channel to emulate ncs channels + # TODO: to support multiple files, we need to adjust range since i must be unique i's for i in range(len(nvt_selected_features)): file_mmap = self._get_file_map(filename) @@ -400,7 +409,6 @@ def _parse_header(self): signal_channels.append((chan_name, str(i), info["sampling_rate"], _nvt_sample_dtype, units, gain, offset, stream_id)) # NOTE: only loading the selected features here. "bitfield_points" and "colored_tgts" are not loaded due to their dimensionality - # self._nvt_memmap[chan_id] = file_mmap[["timestamp", "system_id", "x_location", "y_location", "head_angle"]] self._nvt_memmaps.append({chan_uid : file_mmap[["timestamp", nvt_selected_features[i]]]}) info["Resolution"] = str(info["Resolution"]) @@ -438,7 +446,7 @@ def _parse_header(self): nvt_stream_infos = {} # Read ncs files of each stream for gap detection and nb_segment computation. - # Since nvt files are passed as signals, we need to filter them out. + # HACK: filter out the nvt data for now since nvt files are passed as signals, and same segment is assumed. for stream_id in np.unique(signal_channels["stream_id"]): stream_channels = signal_channels[signal_channels["stream_id"] == stream_id] stream_chan_uids = zip(stream_channels["name"], stream_channels["id"]) @@ -452,8 +460,6 @@ def _parse_header(self): "ncs_segment_infos": ncsSegTimestampLimits, "section_structure": section_structure, } - # list with 1 element (a single dict), keys are chan_uids, values are memmaps - # print(ncs_stream_infos[stream_id]["segment_sig_memmaps"]) else: stream_filenames = [self.nvt_filenames[chuid] for chuid in stream_chan_uids] @@ -689,7 +695,7 @@ def _get_file_map(filename): return np.memmap(filename, dtype=nev_dtype, mode="r", offset=NlxHeader.HEADER_SIZE) elif suffix == "nvt": - return np.memmap(filename, dtype=NeuralynxRawIO._nvt_dtype, mode="r", offset=NlxHeader.HEADER_SIZE) + return np.memmap(filename, dtype=_nvt_dtype, mode="r", offset=NlxHeader.HEADER_SIZE) else: raise ValueError(f"Unknown file suffix {suffix}") @@ -785,7 +791,7 @@ def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, strea stream_id = self.header["signal_streams"][stream_index]["id"] stream_mask = self.header["signal_channels"]["stream_id"] == stream_id - # HACK: for some reason channel_ids and channel_names have an extra dimension, adding [0] fixes it temporarily + # HACK: for some reason channel_ids and channel_names have an extra dimension, adding [0] fixes it channel_ids = self.header["signal_channels"][stream_mask][channel_indexes]["id"][0] channel_names = self.header["signal_channels"][stream_mask][channel_indexes]["name"][0] @@ -1020,9 +1026,9 @@ def scan_stream_ncs_files(self, ncs_filenames): def generate_nvt_seg_infos(self): """ - NOTE: this will not work on multiple nvt files, this is just a temporary solution - maybe add a check for multiple nvt files and raise an error if there are more than one - TODO: write this later. + Since NVT files are processed in a similar way to NCS files, this RawIO pass them in similar + data structures internally. this function simply emulates the scan_stream_ncs_files function + for NVT files so that the data can be processed in the same way. """ # HACK: nb_segments assumed to be 1, it really doesn't matter for now seg_time_limits = SegmentTimeLimits( @@ -1052,9 +1058,7 @@ def generate_nvt_seg_infos(self): memmaps[chan_name] = {} memmaps[chan_name][key] = data - memmaps = list(memmaps.values()) - return memmaps, seg_time_limits ncs_unit = "#packet" @@ -1078,6 +1082,18 @@ def generate_nvt_seg_infos(self): ("event_string", "S128"), ] +_nvt_dtype = [ + ("swstx", "uint16"), + ("system_id", "uint16"), + ("data_size", "uint16"), + ("timestamp", "uint64"), + ("bitfield_points", "uint32", (400,)), + ("unused", "int16"), + ("x_location", "int32"), + ("y_location", "int32"), + ("head_angle", "int32"), + ("colored_tgts", "int32", (50,)), +] def get_nse_or_ntt_dtype(info, ext): """ From 0affa952ce73469660ec9521de2cbb8d18fb8241 Mon Sep 17 00:00:00 2001 From: LEGion-42 Date: Mon, 27 May 2024 18:45:24 -0400 Subject: [PATCH 19/24] fix(NeuralynxRawIO): fix error when reading multi-segmental data --- neo/rawio/neuralynxrawio/neuralynxrawio.py | 36 ++++++++++++++++------ 1 file changed, 27 insertions(+), 9 deletions(-) diff --git a/neo/rawio/neuralynxrawio/neuralynxrawio.py b/neo/rawio/neuralynxrawio/neuralynxrawio.py index 1e0b89731..dc9359c3f 100644 --- a/neo/rawio/neuralynxrawio/neuralynxrawio.py +++ b/neo/rawio/neuralynxrawio/neuralynxrawio.py @@ -68,6 +68,7 @@ import os import pathlib import copy +import warnings from collections import namedtuple, OrderedDict from neo.rawio.neuralynxrawio.ncssections import NcsSection, NcsSectionsFactory @@ -446,8 +447,9 @@ def _parse_header(self): nvt_stream_infos = {} # Read ncs files of each stream for gap detection and nb_segment computation. - # HACK: filter out the nvt data for now since nvt files are passed as signals, and same segment is assumed. - for stream_id in np.unique(signal_channels["stream_id"]): + # signal channels are sorted by dtype so that ncs files are read first + sorted_signal_channels = np.sort(signal_channels, order=["dtype"]) + for stream_id in np.unique(sorted_signal_channels["stream_id"]): stream_channels = signal_channels[signal_channels["stream_id"] == stream_id] stream_chan_uids = zip(stream_channels["name"], stream_channels["id"]) # ncs files have dtype int16 while nvt files have dtype int32, so we use this to filter out nvt files @@ -462,8 +464,12 @@ def _parse_header(self): } else: + # TODO: this way of dealing with segments is not ideal, but it is a temporary solution + ref_stream_id = list(ncs_stream_infos.keys())[0] + nb_segment = len(ncs_stream_infos[ref_stream_id]["section_structure"].sects) + stream_filenames = [self.nvt_filenames[chuid] for chuid in stream_chan_uids] - nvt_memmaps, time_infos = self.generate_nvt_seg_infos() + nvt_memmaps, time_infos = self.generate_nvt_seg_infos(nb_segment) nvt_stream_infos[stream_id] = { "segment_sig_memmaps": nvt_memmaps, @@ -668,6 +674,13 @@ def min_max_tuple(tuple1, tuple2): # ~ ev_ann['nttl'] = # ~ ev_ann['digital_marker'] = # ~ ev_ann['analog_marker'] = + + if self._nb_segment > 1 and self._nvt_memmaps != []: + warnings.warn( + "\nMultiple segments detected, data from nvt file is duplicated to each segment. " + "Loading nvt files along with multi-segmental ncs data are currently not well supported, " + "try setting ignore_nvt=True or load nvt files separately.", + UserWarning) @staticmethod def _get_file_map(filename): @@ -1024,15 +1037,18 @@ def scan_stream_ncs_files(self, ncs_filenames): return memmaps, seg_time_limits, stream_section_structure - def generate_nvt_seg_infos(self): + def generate_nvt_seg_infos(self, nb_segment): """ Since NVT files are processed in a similar way to NCS files, this RawIO pass them in similar data structures internally. this function simply emulates the scan_stream_ncs_files function for NVT files so that the data can be processed in the same way. + TODO: data from the nvt file is put in segment[0] by default without any segmenting. This is + causing KeyError when ncs data contains multiple segments. So we are populating all other + segments other than the first with copies. This is only a temporary solution. """ # HACK: nb_segments assumed to be 1, it really doesn't matter for now seg_time_limits = SegmentTimeLimits( - nb_segment=1, t_start=[], t_stop=[], length=[], timestamp_limits=[] + nb_segment=nb_segment, t_start=[], t_stop=[], length=[], timestamp_limits=[] ) memmaps ={} @@ -1043,22 +1059,24 @@ def generate_nvt_seg_infos(self): chan_name = key[0] if chan_name != prev_chan_name: prev_chan_name = chan_name - seg_time_limits.length.append(data.shape[0]) ts = data["timestamp"] ts0 = ts[0] ts1 = ts[-1] ts0, ts1 = min(ts0, ts[0]), max(ts1, ts[-1]) - seg_time_limits.t_start.append(ts0 / 1e6) - seg_time_limits.t_stop.append(ts1 / 1e6) - seg_time_limits.timestamp_limits.append((ts0, ts1)) + for i in range(nb_segment): + seg_time_limits.length.append(data.shape[0]) + seg_time_limits.t_start.append(ts0 / 1e6) + seg_time_limits.t_stop.append(ts1 / 1e6) + seg_time_limits.timestamp_limits.append((ts0, ts1)) if chan_name not in memmaps: memmaps[chan_name] = {} memmaps[chan_name][key] = data memmaps = list(memmaps.values()) + memmaps = [data for data in memmaps for _ in range(nb_segment)] return memmaps, seg_time_limits ncs_unit = "#packet" From 690009a10cafa4e0285707a2d44325d794831ae9 Mon Sep 17 00:00:00 2001 From: LEGion-42 Date: Mon, 27 May 2024 18:52:30 -0400 Subject: [PATCH 20/24] fix(testing): update header size verification with nvt data included --- neo/test/rawiotest/test_neuralynxrawio.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neo/test/rawiotest/test_neuralynxrawio.py b/neo/test/rawiotest/test_neuralynxrawio.py index c294cd4d5..c297cf086 100644 --- a/neo/test/rawiotest/test_neuralynxrawio.py +++ b/neo/test/rawiotest/test_neuralynxrawio.py @@ -164,7 +164,7 @@ def test_exclude_filenames(self): self.assertEqual(len(rawio.ncs_filenames), 1) self.assertEqual(len(rawio.nev_filenames), 0) sigHdrs = rawio.header["signal_channels"] - self.assertEqual(sigHdrs.size, 1) + self.assertEqual(sigHdrs.size, 4) self.assertEqual(sigHdrs[0][0], "CSC1") self.assertEqual(sigHdrs[0][1], "58") self.assertEqual(len(rawio.header["spike_channels"]), 8) From 0ae2bdda279dde30589a57648d5f71808654e489 Mon Sep 17 00:00:00 2001 From: LEGion-42 Date: Mon, 27 May 2024 18:55:28 -0400 Subject: [PATCH 21/24] fix(testing): update another header size verification i missed --- neo/test/rawiotest/test_neuralynxrawio.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neo/test/rawiotest/test_neuralynxrawio.py b/neo/test/rawiotest/test_neuralynxrawio.py index c297cf086..be7605d43 100644 --- a/neo/test/rawiotest/test_neuralynxrawio.py +++ b/neo/test/rawiotest/test_neuralynxrawio.py @@ -150,7 +150,7 @@ def test_exclude_filenames(self): self.assertEqual(len(rawio.ncs_filenames), 1) self.assertEqual(len(rawio.nev_filenames), 1) sigHdrs = rawio.header["signal_channels"] - self.assertEqual(sigHdrs.size, 1) + self.assertEqual(sigHdrs.size, 4) self.assertEqual(sigHdrs[0][0], "CSC1") self.assertEqual(sigHdrs[0][1], "58") self.assertEqual(len(rawio.header["spike_channels"]), 8) From e13011ac2e5d51d2acc189087cea45904db514a1 Mon Sep 17 00:00:00 2001 From: LEGion-42 Date: Wed, 29 May 2024 04:02:21 -0400 Subject: [PATCH 22/24] fix(NeuralynxRawIO): fix KeyError and channel selection issue with _get_analogsignal_chunk --- neo/rawio/neuralynxrawio/neuralynxrawio.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/neo/rawio/neuralynxrawio/neuralynxrawio.py b/neo/rawio/neuralynxrawio/neuralynxrawio.py index dc9359c3f..43166c188 100644 --- a/neo/rawio/neuralynxrawio/neuralynxrawio.py +++ b/neo/rawio/neuralynxrawio/neuralynxrawio.py @@ -804,15 +804,15 @@ def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, strea stream_id = self.header["signal_streams"][stream_index]["id"] stream_mask = self.header["signal_channels"]["stream_id"] == stream_id - # HACK: for some reason channel_ids and channel_names have an extra dimension, adding [0] fixes it - channel_ids = self.header["signal_channels"][stream_mask][channel_indexes]["id"][0] - channel_names = self.header["signal_channels"][stream_mask][channel_indexes]["name"][0] + # HACK: for some reason channel_ids and channel_names have an extra dimension, adding .flatten() fixes it + channel_ids = self.header["signal_channels"][stream_mask][channel_indexes]["id"].flatten() + channel_names = self.header["signal_channels"][stream_mask][channel_indexes]["name"].flatten() - sig_chunk = np.zeros((i_stop - i_start, len(nvt_selected_features)), dtype="int32") + sig_chunk = np.zeros((i_stop - i_start, len(channel_ids)), dtype="int32") for i, chan_uid in enumerate(zip(channel_names, channel_ids)): data = self._sigs_memmaps[seg_index][chan_uid] - sig_chunk[:, i] = data[nvt_selected_features[i]] + sig_chunk[:, i] = data[nvt_selected_features[int(chan_uid[1])]][i_start:i_stop] return sig_chunk From a2970fb8be2a6f284730f103cb5ea06efcd4dc29 Mon Sep 17 00:00:00 2001 From: LEGion-42 Date: Wed, 29 May 2024 04:59:15 -0400 Subject: [PATCH 23/24] fix: exclude nvt data from ncs tests --- neo/test/iotest/test_neuralynxio.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neo/test/iotest/test_neuralynxio.py b/neo/test/iotest/test_neuralynxio.py index 7c1ecc658..dd20fe200 100644 --- a/neo/test/iotest/test_neuralynxio.py +++ b/neo/test/iotest/test_neuralynxio.py @@ -108,7 +108,7 @@ def test_read_block(self): # There are two segments due to gap in recording self.assertEqual(len(block.segments), 2) for seg in block.segments: - self.assertEqual(len(seg.analogsignals), 1) + self.assertEqual(len(seg.analogsignals), 2) self.assertEqual(seg.analogsignals[0].shape[-1], 2) self.assertEqual(seg.analogsignals[0].sampling_rate, 2.0 * pq.kHz) self.assertEqual(len(seg.spiketrains), 8) @@ -271,7 +271,7 @@ def test_ncs(self): block = nio.read_block() # check that data agrees in first segment first channel only - for anasig_id, anasig in enumerate(block.segments[0].analogsignals): + for anasig_id, anasig in enumerate(block.segments[0].analogsignals[0]): chid = anasig.array_annotations["channel_ids"][0] chname = str(anasig.array_annotations["channel_names"][0]) From 2c3b67d560b42746949f03b05202e10c3a865089 Mon Sep 17 00:00:00 2001 From: LEGion-42 Date: Wed, 29 May 2024 05:34:15 -0400 Subject: [PATCH 24/24] fix: exclude nvt data from ncs tests --- neo/test/iotest/test_neuralynxio.py | 47 +++++++++++++++-------------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/neo/test/iotest/test_neuralynxio.py b/neo/test/iotest/test_neuralynxio.py index dd20fe200..d400d5aff 100644 --- a/neo/test/iotest/test_neuralynxio.py +++ b/neo/test/iotest/test_neuralynxio.py @@ -115,7 +115,7 @@ def test_read_block(self): # Testing different parameter combinations block = nio.read_block(load_waveforms=True) - self.assertEqual(len(block.segments[0].analogsignals), 1) + self.assertEqual(len(block.segments[0].analogsignals), 2) self.assertEqual(len(block.segments[0].spiketrains), 8) self.assertEqual(block.segments[0].spiketrains[0].waveforms.shape[0], block.segments[0].spiketrains[0].shape[0]) # this is tetrode data, containing 32 samples per waveform @@ -271,28 +271,29 @@ def test_ncs(self): block = nio.read_block() # check that data agrees in first segment first channel only - for anasig_id, anasig in enumerate(block.segments[0].analogsignals[0]): - chid = anasig.array_annotations["channel_ids"][0] - - chname = str(anasig.array_annotations["channel_names"][0]) - chuid = (chname, chid) - filename = nio.ncs_filenames[chuid][:-3] + "txt" - filename = filename.replace("original_data", "plain_data") - overlap = 512 * 500 - if os.path.isfile(filename): - plain_data = self._load_plaindata(filename, overlap) - gain_factor_0 = plain_data[0] / anasig.magnitude[0, 0] - numToTest = min(len(plain_data), len(anasig.magnitude[:, 0])) - np.testing.assert_allclose( - plain_data[:numToTest], - anasig.magnitude[:numToTest, 0] * gain_factor_0, - rtol=0.01, - err_msg=" for file " + filename, - ) - else: - warnings.warn(f"Could not find corresponding test file {filename}") - # TODO: Create missing plain data file using NeuraView - # https://neuralynx.com/software/category/data-analysis + for anasig_id, anasig in enumerate(block.segments[0].analogsignals): + if "VideoFormat" not in anasig.array_annotations: + chid = anasig.array_annotations["channel_ids"][0] + + chname = str(anasig.array_annotations["channel_names"][0]) + chuid = (chname, chid) + filename = nio.ncs_filenames[chuid][:-3] + "txt" + filename = filename.replace("original_data", "plain_data") + overlap = 512 * 500 + if os.path.isfile(filename): + plain_data = self._load_plaindata(filename, overlap) + gain_factor_0 = plain_data[0] / anasig.magnitude[0, 0] + numToTest = min(len(plain_data), len(anasig.magnitude[:, 0])) + np.testing.assert_allclose( + plain_data[:numToTest], + anasig.magnitude[:numToTest, 0] * gain_factor_0, + rtol=0.01, + err_msg=" for file " + filename, + ) + else: + warnings.warn(f"Could not find corresponding test file {filename}") + # TODO: Create missing plain data file using NeuraView + # https://neuralynx.com/software/category/data-analysis def test_keep_original_spike_times(self): for session in self.files_to_test: