From 939d128de0d097c4cfb886a9382d1bce5d9e7ed5 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Fri, 2 Aug 2024 15:26:40 +0200 Subject: [PATCH 01/63] Basic Reader Nicolet e Files --- neo/io/nicoletio.py | 39 ++ neo/rawio/nicoletrawio.py | 1119 +++++++++++++++++++++++++++++++++++++ 2 files changed, 1158 insertions(+) create mode 100644 neo/io/nicoletio.py create mode 100644 neo/rawio/nicoletrawio.py diff --git a/neo/io/nicoletio.py b/neo/io/nicoletio.py new file mode 100644 index 000000000..c4d29c558 --- /dev/null +++ b/neo/io/nicoletio.py @@ -0,0 +1,39 @@ +""" +neo.io has been split into a 2-level API: + * neo.io: this API gives Neo objects + * neo.rawio: this API gives raw data as they are in files. + +Developers are encourage to use neo.rawio. + +When this is done the neo.io can be implemented trivially +using code like shown in this file. + +Author: sgarcia + +""" + +from neo.io.basefromrawio import BaseFromRaw +from neo.rawio.nicoletrawio import NicoletRawIO + + +class NicoletIO(NicoletRawIO, BaseFromRaw): + name = "NicoleIO" + description = "Class for reading/writing Nicolet files (.e)" + + # This is an inportant choice when there are several channels. + # 'split-all' : 1 AnalogSignal each 1 channel + # 'group-by-same-units' : one 2D AnalogSignal for each group of channel with same units + _prefered_signal_group_mode = "group-by-same-units" + + def __init__(self, filepath=""): + NicoletRawIO.__init__(self, filepath=filepath) + BaseFromRaw.__init__(self, filepath) + + +if __name__ == '__main__': + + #file = NicoletRawIO(r'\\fsnph01\NPH_Research\xxx_PythonShare\nicolet_parser\data\janbrogger.e') + #file = NicoletRawIO(r'\\fsnph01\NPH_Research\xxx_PythonShare\nicolet_parser\data\Routine6t1.e') + #file = NicoletRawIO(r'\\fsnph01\NPH_Archiv\LTM\Band0299\58795\9140.e') + file = NicoletIO(r'C:\temp\Patient1_ABLEIT53_t2.e') + segment = file.read_segment() diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py new file mode 100644 index 000000000..b465cd9dc --- /dev/null +++ b/neo/rawio/nicoletrawio.py @@ -0,0 +1,1119 @@ +""" +This module implements a reader for .e files produced by the NicoletOne EEG-System. + +This reader is based on a MATLAB implementation of a .e file reader (https://github.com/ieeg-portal/Nicolet-Reader). +The original authors of the MATLAB implementation are Joost Wagenaar, Cristian Donos, Jan Brogger and Callum Stewart. + +Author: Murezi Capaul + +CHECK IF THIS CHANGE IS ALSO ON THE MAIN BRANCH AND DEV BRANCH +""" + +from __future__ import annotations + +import numpy as np +import warnings +from datetime import datetime, timedelta +from pathlib import Path + +from neo.rawio.baserawio import ( + BaseRawIO, + _signal_channel_dtype, + _signal_stream_dtype, + _spike_channel_dtype, + _event_channel_dtype, +) + + +class NicoletRawIO(BaseRawIO): + ''' + The Class to read in .e files produced by the NicoletOne EEG-System. + + Parameters + ---------- + filepath: str | Path + The path to the .e file. Will be transformed into a WindowsPath object + + Notes + ---------- + Currently, only channels that have the same sampling rate as the EEG-Channels will be processed. Other channels will be discarded. + ''' + + extensions = ["e"] + rawmode = "one-file" + + LABELSIZE = 32 + TSLABELSIZE = 64 + UNITSIZE = 16 + ITEMNAMESIZE = 64 + UNIX_TIME_CONVERSION = 2209161600 #TODO: Currently, time is always UTC. Find where to read timezones + SEC_PER_DAY = 86400 + + TAGS_DICT = { + 'ExtraDataTags' : 'ExtraDataTags', + 'SegmentStream' : 'SegmentStream', + 'DataStream' : 'DataStream', + 'InfoChangeStream' : 'InfoChangeStream', + 'InfoGuids' : 'InfoGuids', + '{A271CCCB-515D-4590-B6A1-DC170C8D6EE2}' : 'TSGUID', + '{8A19AA48-BEA0-40D5-B89F-667FC578D635}' : 'DERIVATIONGUID', + '{F824D60C-995E-4D94-9578-893C755ECB99}' : 'FILTERGUID', + '{02950361-35BB-4A22-9F0B-C78AAA5DB094}' : 'DISPLAYGUID', + '{8E94EF21-70F5-11D3-8F72-00105A9AFD56}' : 'FILEINFOGUID', + '{E4138BC0-7733-11D3-8685-0050044DAAB1}' : 'SRINFOGUID', + '{C728E565-E5A0-4419-93D2-F6CFC69F3B8F}' : 'EVENTTYPEINFOGUID', + '{D01B34A0-9DBD-11D3-93D3-00500400C148}' : 'AUDIOINFOGUID', + '{BF7C95EF-6C3B-4E70-9E11-779BFFF58EA7}' : 'CHANNELGUID', + '{2DEB82A1-D15F-4770-A4A4-CF03815F52DE}' : 'INPUTGUID', + '{5B036022-2EDC-465F-86EC-C0A4AB1A7A91}' : 'INPUTSETTINGSGUID', + '{99A636F2-51F7-4B9D-9569-C7D45058431A}' : 'PHOTICGUID', + '{55C5E044-5541-4594-9E35-5B3004EF7647}' : 'ERRORGUID', + '{223A3CA0-B5AC-43FB-B0A8-74CF8752BDBE}' : 'VIDEOGUID', + '{0623B545-38BE-4939-B9D0-55F5E241278D}' : 'DETECTIONPARAMSGUID', + '{CE06297D-D9D6-4E4B-8EAC-305EA1243EAB}' : 'PAGEGUID', + '{782B34E8-8E51-4BB9-9701-3227BB882A23}' : 'ACCINFOGUID', + '{3A6E8546-D144-4B55-A2C7-40DF579ED11E}' : 'RECCTRLGUID', + '{D046F2B0-5130-41B1-ABD7-38C12B32FAC3}' : 'GUID TRENDINFOGUID', + '{CBEBA8E6-1CDA-4509-B6C2-6AC2EA7DB8F8}' : 'HWINFOGUID', + '{E11C4CBA-0753-4655-A1E9-2B2309D1545B}' : 'VIDEOSYNCGUID', + '{B9344241-7AC1-42B5-BE9B-B7AFA16CBFA5}' : 'SLEEPSCOREINFOGUID', + '{15B41C32-0294-440E-ADFF-DD8B61C8B5AE}' : 'FOURIERSETTINGSGUID', + '{024FA81F-6A83-43C8-8C82-241A5501F0A1}' : 'SPECTRUMGUID', + '{8032E68A-EA3E-42E8-893E-6E93C59ED515}' : 'SIGNALINFOGUID', + '{30950D98-C39C-4352-AF3E-CB17D5B93DED}' : 'SENSORINFOGUID', + '{F5D39CD3-A340-4172-A1A3-78B2CDBCCB9F}' : 'DERIVEDSIGNALINFOGUID', + '{969FBB89-EE8E-4501-AD40-FB5A448BC4F9}' : 'ARTIFACTINFOGUID', + '{02948284-17EC-4538-A7FA-8E18BD65E167}' : 'STUDYINFOGUID', + '{D0B3FD0B-49D9-4BF0-8929-296DE5A55910}' : 'PATIENTINFOGUID', + '{7842FEF5-A686-459D-8196-769FC0AD99B3}' : 'DOCUMENTINFOGUID', + '{BCDAEE87-2496-4DF4-B07C-8B4E31E3C495}' : 'USERSINFOGUID', + '{B799F680-72A4-11D3-93D3-00500400C148}' : 'EVENTGUID', + '{AF2B3281-7FCE-11D2-B2DE-00104B6FC652}' : 'SHORTSAMPLESGUID', + '{89A091B3-972E-4DA2-9266-261B186302A9}' : 'DELAYLINESAMPLESGUID', + '{291E2381-B3B4-44D1-BB77-8CF5C24420D7}' : 'GENERALSAMPLESGUID', + '{5F11C628-FCCC-4FDD-B429-5EC94CB3AFEB}' : 'FILTERSAMPLESGUID', + '{728087F8-73E1-44D1-8882-C770976478A2}' : 'DATEXDATAGUID', + '{35F356D9-0F1C-4DFE-8286-D3DB3346FD75}' : 'TESTINFOGUID'} + + INFO_PROPS = [ + 'patientID', 'firstName','middleName','lastName', + 'altID','mothersMaidenName','DOB','DOD','street','sexID','phone', + 'notes','dominance','siteID','suffix','prefix','degree','apartment', + 'city','state','country','language','height','weight','race','religion', + 'maritalStatus'] + + #TODO: Find more translations for events guids -> Open file in Nicolet Viewer and here, and compare names of events + HC_EVENT = { + '{A5A95612-A7F8-11CF-831A-0800091B5BDA}' : 'Annotation', + '{A5A95646-A7F8-11CF-831A-0800091B5BDA}' : 'Seizure', + '{08784382-C765-11D3-90CE-00104B6F4F70}' : 'Format change', + '{6FF394DA-D1B8-46DA-B78F-866C67CF02AF}' : 'Photic', + '{481DFC97-013C-4BC5-A203-871B0375A519}' : 'Posthyperventilation', + '{725798BF-CD1C-4909-B793-6C7864C27AB7}' : 'Review progress', + '{96315D79-5C24-4A65-B334-E31A95088D55}' : 'Exam start', + '{A5A95608-A7F8-11CF-831A-0800091B5BDA}' : 'Hyperventilation', + '{A5A95617-A7F8-11CF-831A-0800091B5BDA}' : 'Impedance', + '{A5A95645-A7F8-11CF-831A-0800091B5BDA}' : 'Event Comment', + '{C3B68051-EDCF-418C-8D53-27077B92DE22}' : 'Spike', + '{99FFE0AA-B8F9-49E5-8390-8F072F4E00FC}' : 'EEG Check', + '{A5A9560A-A7F8-11CF-831A-0800091B5BDA}' : 'Print', + '{A5A95616-A7F8-11CF-831A-0800091B5BDA}' : 'Patient Event', + '{0DE05C94-7D03-47B9-864F-D586627EA891}' : 'Eyes closed', + '{583AA2C6-1F4E-47CF-A8D4-80C69EB8A5F3}' : 'Eyes open', + '{BAE4550A-8409-4289-9D8A-0D571A206BEC}' : 'Eating', + '{1F3A45A4-4D0F-4CC4-A43A-CAD2BC2D71F2}' : 'ECG', + '{B0BECF64-E669-42B1-AE20-97A8B0BBEE26}' : 'Toilet', + '{A5A95611-A7F8-11CF-831A-0800091B5BDA}' : 'Fix Electrode'} + + + def __init__(self, filepath = ""): + BaseRawIO.__init__(self) + self.filepath = Path(filepath) + + def _source_name(self): + return self.filepath + + + def _parse_header(self): + self._extract_header_information() + self.header = {} + self.header["nb_block"] = 1 #TODO: Find out if multiple blocks exist + self.header["nb_segment"] = [len(self.segments_properties)] + self.header["signal_streams"] = np.array([("Signals", "0")], #TODO: Consider implementing all recorded channels after finding out if they make sense + dtype=_signal_stream_dtype) + self.header["signal_channels"] = self._create_signal_channels(dtype = _signal_channel_dtype) if self.channel_properties else self._create_signal_channels_no_channel_props(_signal_channel_dtype) + self.header["spike_channels"] = np.array([], #TODO: Find if there is automatic spike detection + dtype= _spike_channel_dtype) + self.header["event_channels"] = np.array([("Events", "0", "event"), #TODO: Find if there are more types of events that can be identified + ("Epochs", "1", "epoch")], + dtype = _event_channel_dtype) + self._generate_minimal_annotations() + self._generate_additional_annotations() + + def _get_tags(self): + misc_structure = [ + ('misc1', 'uint32', 5), + #('unknown', 'uint32'), + #('index_idx', 'uint32') + ] + tags_structure = [ + ('tag', 'S80'), + ('index', 'uint32')] + + with open(self.filepath, "rb") as fid: + index_idx = read_as_dict(fid, + misc_structure) + unknown = read_as_dict(fid, + [('unknown', 'uint32', 1)]) + fid.seek(172) + n_tags = read_as_list(fid, + [('n_tags', 'uint32')]) + tags = [read_as_dict(fid, + tags_structure) for _ in range(n_tags)] + + for entry in tags: + try: + entry['id_str'] = self.TAGS_DICT[entry['tag']] + except KeyError: + entry['id_str'] = 'UNKNOWN' + + self.n_tags = n_tags + self.index_idx = index_idx + self.tags = tags + + def _get_qi(self): + qi_structure = [ + ('n_entries', 'uint32'), + ('misc1', 'uint32'), + ('index_idx', 'uint32'), + ('misc3', 'uint32'), + ('l_qi', 'uint64'), + ('first_idx', 'uint64', self.n_tags), + ] + with open(self.filepath, "rb") as fid: + fid.seek(172208) + qi = read_as_dict(fid, + qi_structure) + self.qi = qi + + def _get_main_index(self): + #TODO: Find file with multiple index pointers to test the while loop + #TODO: Find out what multiple block lengths mean + main_index = [] + current_index = 0 + next_index_pointer = self.qi['index_idx'] + with open(self.filepath, "rb") as fid: + while current_index < self.qi['n_entries']: + fid.seek(next_index_pointer) + nr_index = read_as_list(fid, + [('nr_index', 'uint64')] + ) + var = read_as_list(fid, + [('var', 'uint64', int(3*nr_index))]) + + for i in range(nr_index): + main_index.append({ + 'section_idx' : int(var[3*(i)]), + 'offset' : int(var[3*(i)+1]), + 'block_l' : int(var[3*(i)+2] % 2**32), + 'section_l' : round(var[3*(i)+2]/(2**32))}) + + next_index_pointer = read_as_list(fid, + [('next_index_pointer', 'uint64')]) + current_index = current_index + (i + 1) + + self.main_index = main_index + self.all_section_ids = [entry['section_idx'] for entry in main_index] + + def _read_dynamic_packets(self): + dynamic_packets = [] + [dynamic_packets_instace] = self._get_index_instances(id_str = 'InfoChangeStream') + offset = dynamic_packets_instace['offset'] + self.n_dynamic_packets = int(dynamic_packets_instace['section_l']/48) + with open(self.filepath, "rb") as fid: + fid.seek(offset) + for i in range(self.n_dynamic_packets): + guid_offset = offset + (i+1)*48 + + dynamic_packet_structure = [ + ('guid_list', 'uint8', 16), + ('date', 'float64'), + ('datefrace', 'float64'), + ('internal_offset_start', 'uint64'), + ('packet_size', 'uint64')] + + dynamic_packet = read_as_dict(fid, + dynamic_packet_structure) + guid_as_str = _convert_to_guid(dynamic_packet['guid_list']) + + + if guid_as_str in list(self.TAGS_DICT.keys()): + id_str = self.TAGS_DICT[guid_as_str] + else: + id_str = 'UNKNOWN' + + dynamic_packet['offset'] = int(guid_offset) + dynamic_packet['guid'] = guid_as_str.replace('-', '').replace('{', '').replace('}', '') + dynamic_packet['guid_as_str'] = guid_as_str + dynamic_packet['id_str'] = id_str + + dynamic_packets.append(dynamic_packet) + self.dynamic_packets = dynamic_packets + + def _get_dynamic_packets_data(self): + #TODO: Try to merge into _read_dynamic_packets + with open(self.filepath, "rb") as fid: + for i in range(self.n_dynamic_packets): + data = [] + dynamic_packet_instances = self._get_index_instances(tag = self.dynamic_packets[i]['guid_as_str']) + internal_offset = 0 + remaining_data_to_read = int(self.dynamic_packets[i]['packet_size']) + current_target_start = int(self.dynamic_packets[i]['internal_offset_start']) + for j in range(len(dynamic_packet_instances)): + current_instance = dynamic_packet_instances[j] + if ((internal_offset <= (current_target_start)) + & ((internal_offset + current_instance['section_l']) >= current_target_start)): + + start_at = current_target_start + stop_at = min(start_at + remaining_data_to_read, + internal_offset + current_instance['section_l']) + read_length = stop_at - start_at + + file_pos_start = current_instance['offset'] + start_at - internal_offset + fid.seek(int(file_pos_start)) + data_part = read_as_list(fid, + [('data', 'uint8', read_length)]) + data = data + list(data_part) + + remaining_data_to_read = remaining_data_to_read - read_length + current_target_start = current_target_start + read_length + + internal_offset = internal_offset + current_instance['section_l'] + + self.dynamic_packets[i]['data'] = np.array(data) + + def _get_patient_guid(self): + [idx_instance] = self._get_index_instances(id_str = 'PATIENTINFOGUID') + patient_info_structure = [ + ('guid', 'uint8', 16), + ('l_section', 'uint64'), + ('n_values', 'uint64'), + ('n_bstr', 'uint64')] + with open(self.filepath, "rb") as fid: + fid.seek(idx_instance['offset']) + patient_info = read_as_dict(fid, + patient_info_structure + ) + + for i in range(patient_info['n_values']): + id_temp = read_as_list(fid, + [('value', 'uint64')]) + + if id_temp in [7, 8]: + value = read_as_list(fid, + [('value', 'float64')]) + value = _convert_to_date(value) + elif id_temp in [23, 24]: + value = read_as_list(fid, + [('value', 'float64')]) + else: + value = 0 + patient_info[self.INFO_PROPS[int(id_temp) - 1]] = value + + + str_setup = read_as_list(fid, + [('setup', 'uint64', int(patient_info['n_bstr']*2))]) + + for i in range(0, int(patient_info['n_bstr']*2), 2): + id_temp = str_setup[i] + value = ''.join([read_as_list(fid, + [('value', 'S2')]) for _ in range(int(str_setup[i + 1]) + 1)]).strip() + patient_info[self.INFO_PROPS[int(id_temp) - 1]] = value + pass + + self.patient_info = patient_info + + def _get_signal_properties(self): + signal_properties = [] + signal_structure_segment = [ + ('guid', 'uint8', 16), + ('name', 'S1', self.ITEMNAMESIZE)] + + idx_instances = self._get_index_instances('SIGNALINFOGUID') + for instance in idx_instances: + with open(self.filepath, "rb") as fid: + fid.seek(instance['offset']) + signal_structure = read_as_dict(fid, + signal_structure_segment) + unknown = read_as_list(fid, + [('unknown', 'S1', 152)]) + fid.seek(512,1) + n_idx = read_as_dict(fid, + [('n_idx', 'uint16'), + ('misc1', 'uint16', 3)]) + + for i in range(n_idx['n_idx']): + + signal_properties_segment = [ + ('name', 'S2', self.LABELSIZE), + ('transducer', 'S2', self.UNITSIZE), + ('guid', 'uint8', 16), + ('bipolar', 'uint32'), + ('ac', 'uint32'), + ('high_filter', 'uint32'), + ('color', 'uint32'), + + ] + + properties = read_as_dict(fid, + signal_properties_segment) + #TODO: Consider setting On and Biploar to T/F + + signal_properties.append(properties) + reserved = read_as_list(fid, + [('reserved', 'S1', 256)]) + + self.signal_structure = signal_structure + self.signal_properties = signal_properties + pass + + def _get_channel_info(self): + channel_properties = [] + channel_structure_structure= [ + [('guid', 'uint8', 16), + ('name', 'S1', self.ITEMNAMESIZE)], + [('reserved', 'uint8', 16), + ('device_id', 'uint8', 16)] + ] + + [idx_instance] = self._get_index_instances('CHANNELGUID') + + with open(self.filepath, "rb") as fid: + fid.seek(idx_instance['offset']) + channel_structure = read_as_dict(fid, + channel_structure_structure[0]) + fid.seek(152, 1) + channel_structure = channel_structure | read_as_dict(fid, + channel_structure_structure[1]) + fid.seek(488,1) + n_index = read_as_list(fid, + [('n_index', 'int32', 2)]) + + current_index = 0 + for i in range(n_index[1]): + + channel_properties_structure = [ + ('sensor', 'S2', self.LABELSIZE), + ('sampling_rate', 'float64'), + ('on', 'uint32'), + ('l_input_id', 'uint32'), + ('l_input_setting_id', 'uint32')] + + info = read_as_dict(fid, + channel_properties_structure) + fid.seek(128, 1) + + if info['on']: + index_id = current_index + current_index += 1 + else: + index_id = -1 + + info['index_id'] = index_id + + channel_properties.append(info) + + reserved = read_as_list(fid, + [('reserved', 'S1', 4)]) + + self.channel_structure = channel_structure + self.channel_properties = channel_properties + + def _get_ts_properties(self, ts_packet_index = 0): + ts_properties = [] + + ts_packets = [packet for packet in self.dynamic_packets if packet['id_str'] == 'TSGUID'] + l_ts_packets = len(ts_packets) + self.ts_packets = ts_packets + if l_ts_packets > 0: + #TODO: Add support for multiple TS-Info packages + if l_ts_packets > 1: + warnings.warn(f'{l_ts_packets} TSinfo packets detected; using first instance for all segments. See documentation for info') + ts_packet = ts_packets[ts_packet_index] + elems = _typecast(ts_packet['data'][752:756])[0] + alloc = _typecast(ts_packet['data'][756:760])[0] + offset = 760 + + for i in range(elems): + internal_offset = 0 + top_range = (offset + self.TSLABELSIZE) + + label = _transform_ts_properties(ts_packet['data'][offset:top_range], np.uint16) + + + internal_offset += 2*self.TSLABELSIZE + top_range = offset + internal_offset + self.LABELSIZE + active_sensor = _transform_ts_properties(ts_packet['data'][(offset + internal_offset):top_range], np.uint16) + internal_offset = internal_offset + self.TSLABELSIZE; + top_range = offset + internal_offset + 8 + ref_sensor = _transform_ts_properties(ts_packet['data'][(offset + internal_offset):top_range], np.uint16) + internal_offset += 64; + + low_cut, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) + high_cut, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) + sampling_rate, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) + resolution, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) + mark, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.uint16) + notch, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.uint16) + eeg_offset, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) + offset += 552 + ts_properties.append({ + 'label' : label, + 'active_sensor' : active_sensor, + 'ref_sensor' : ref_sensor, + 'low_cut' : low_cut, + 'high_cut' : high_cut, + 'sampling_rate' : sampling_rate, + 'resolution' : resolution, + 'notch' : notch, + 'mark' : mark, + 'eeg_offset' : eeg_offset}) + + self.ts_properties = ts_properties + pass + + + def _get_segment_start_times(self): + segments_properties = [] + + [segment_instance] = self._get_index_instances('SegmentStream') + n_segments = int(segment_instance['section_l']/152) + + with open(self.filepath, "rb") as fid: + + fid.seek(segment_instance['offset'], 0) + + for i in range(n_segments): + segment_info = {} + segment_info['date_ole'] = read_as_list(fid, + [('date', 'float64')]) + fid.seek(8,1) + segment_info['duration'] = read_as_list(fid, + [('duration', 'float64')]) + print(segment_info['duration']) + fid.seek(128, 1) + segment_info['ch_names'] = [info['label'] for info in self.ts_properties] + segment_info['ref_names'] = [info['ref_sensor'] for info in self.ts_properties] + segment_info['sampling_rates'] = [info['sampling_rate'] for info in self.ts_properties] + segment_info['scale'] = [info['resolution'] for info in self.ts_properties] + + date_str = datetime.fromtimestamp(segment_info['date_ole']*self.SEC_PER_DAY - self.UNIX_TIME_CONVERSION) + start_date = date_str.date() + start_time = date_str.time() + segment_info['date'] = date_str + segment_info['start_date'] = date_str.date() + segment_info['start_time'] = date_str.time() + segment_info['duration'] = timedelta(seconds = segment_info['duration']) + segments_properties.append(segment_info) + self.segments_properties = segments_properties + pass + + def _get_events(self): + events = [] + + event_packet_guid = '{B799F680-72A4-11D3-93D3-00500400C148}' + + event_instances = self._get_index_instances(tag = 'Events') + for instance in event_instances: + offset = instance['offset'] + with open(self.filepath, "rb") as fid: + pkt_structure = [ + ('guid', 'uint8', 16), + ('len', 'uint64', 1)] + fid.seek(offset) + pkt = read_as_dict(fid, + pkt_structure) + pkt['guid'] = _convert_to_guid(pkt['guid']) + n_events = 0 + + + while (pkt['guid'] == event_packet_guid): + event_structure = [ + [('date_ole', 'float64'), + ('date_fraction', 'float64'), + ('duration', 'float64')], + [('user', 'S2', 12), + ('text_length', 'uint64'), + ('guid', 'uint8', 16)], + [('label', 'S2', 32)] + ] + + n_events += 1 + fid.seek(8, 1) + event = read_as_dict(fid, + event_structure[0]) + fid.seek(48, 1) + event = event | read_as_dict(fid, + event_structure[1]) + fid.seek(16, 1) + event = event | read_as_dict(fid, + event_structure[2]) + + event['date'] = datetime.fromtimestamp(event['date_ole']*self.SEC_PER_DAY + event['date_fraction'] - self.UNIX_TIME_CONVERSION) + event['timestamp'] = (event['date'] - self.segments_properties[0]['date']).total_seconds() + event['guid'] = _convert_to_guid(event['guid']) + + try: + id_str = self.HC_EVENT[event['guid']] + except: + id_str = 'UNKNOWN' + if id_str == 'Annotation': + fid.seek(31, 1) + annotation = read_as_list(fid, + [('annotation', 'S2', event['text_length'])]) + else: + annotation = '' + event['id_str'] = id_str + event['annotation'] = annotation + + event['block_index'] = 0 + seg_index = 0 + segment_time_range = [segment['date'] for segment in self.segments_properties] + for segment_time in segment_time_range[1:]: + if segment_time < event['date']: + seg_index += 1 + event['seg_index'] = seg_index + events.append(event) + + event['type'] = '0' if event['duration'] == 0 else '1' + + offset += int(pkt['len']) + fid.seek(offset) + pkt = read_as_dict(fid, + pkt_structure) + pkt['guid'] = _convert_to_guid(pkt['guid']) + + self.events = events + pass + + + + def _get_montage(self): + montages = [] + + montage_instances = self._get_index_instances(id_str = 'DERIVATIONGUID') + with open(self.filepath, "rb") as fid: + + montage_info_structure = [ + [('name', 'S2', 32)], + [('n_derivations', 'uint32'), + ('n_derivations_2', 'uint32')], + [('derivation_name', 'S2', 64), + ('signal_name_1', 'S2', 32), + ('signal_name_2', 'S2', 32)] + ] + fid.seek(int(montage_instances[0]['offset']) + 40) + montage_info = read_as_dict(fid, + montage_info_structure[0]) + fid.seek(640, 1) + montage_info = montage_info | read_as_dict(fid, + montage_info_structure[1]) + + for i in range(montage_info['n_derivations']): + montage = montage_info | read_as_dict(fid, + montage_info_structure[2]) + fid.seek(264, 1) + + montages.append(montage) + display_instances = self._get_index_instances(id_str = 'DISPLAYGUID') + + display_structure = [[ + ('name', 'S2', 32)], + [('n_traces', 'uint32'), + ('n_traces_2', 'uint32')], + [('color', 'uint32')] + ] + fid.seek(int(display_instances[0]['offset']) + 40) + display = read_as_dict(fid, + display_structure[0]) + fid.seek(640, 1) + display = display | read_as_dict(fid, + display_structure[1]) + + if display['n_traces'] == montage_info['n_derivations']: + for i in range(display['n_traces']): + fid.seek(32, 1) + montages[i]['disp_name'] = display['name'] + montages[i]['color'] = read_as_list(fid, + display_structure[2]) + else: + print('Could not match montage derivations with display color table') + + self.montages = montages + self.display = display + + def get_nr_samples(self, block_index = 0, seg_index = 0): + try: + duration = self.segments_properties[seg_index]['duration'].total_seconds() + return([int(sampling_rate * duration) for sampling_rate in self.segments_properties[seg_index]['sampling_rates']]) + except IndexError as error: + print(str(error) + ': Incorrect segment argument; seg_index must be an integer representing segment index, starting from 0.') + pass + + + def _get_raw_signal(self): + earliest_signal_index = [tag['tag'] for tag in self.tags].index('0') + offset = [index['offset'] for index in self.main_index if index['section_idx'] == earliest_signal_index][0] + + raw_signal = np.memmap(self.filepath, dtype="i2", offset = offset, mode="r") + self.signal_data_offset = offset + self.raw_signal = raw_signal + + def _extract_header_information(self): + self._get_tags() + self._get_qi() + self._get_main_index() + self._read_dynamic_packets() + self._get_dynamic_packets_data() + self._get_patient_guid() + self._get_signal_properties() + self._get_channel_info() + self._get_ts_properties() + self._get_segment_start_times() + self._get_events() + self._get_montage() + self._get_raw_signal() + + + + def _create_signal_channels(self, dtype): + ''' + + + _signal_channel_dtype = [ + ("name", "U64"), # not necessarily unique -> sig, ch, ts + ("id", "U64"), # must be unique -> (sig), ch, (ts) + ("sampling_rate", "float64") -> - , ch, ts + ("dtype", "U16"), -> given (int16) + ("units", "U64"), -> sig, -, - + ("gain", "float64"), -, - , ts + ("offset", "float64"), -, -, ts + ("stream_id", "U64"), + ] + + + ''' + signal_channels = [] + for channel in self.channel_properties: + signal = next((item for item in self.signal_properties if item["name"] == channel['sensor']), None) + timestream = next((item for item in self.ts_properties if item["label"].split('-')[0] == channel['sensor']), None) + if signal is None or timestream is None: + continue + signal_channels.append(( + channel['sensor'], + channel['l_input_id'], + channel['sampling_rate'], + 'int16', + signal['transducer'], + timestream['resolution'], + timestream['eeg_offset'], + 0)) + + return np.array(signal_channels, dtype = dtype) + + def _create_signal_channels_no_channel_props(self, dtype): + ''' + + + _signal_channel_dtype = [ + ("name", "U64"), # not necessarily unique -> sig, ts (occasionaly bad names in ts) + ("id", "U64"), # must be unique -> Enumerate + ("sampling_rate", "float64") -> - , ts + ("dtype", "U16"), -> given (int16) + ("units", "U64"), -> sig, - + ("gain", "float64"), -, ts + ("offset", "float64"), -, ts + ("stream_id", "U64"), + ] + + + ''' + #TODO: Verify that using timestreams make sense by comparing a bunch of them + + signal_channels = [] + + + for i, timestream in enumerate(self.ts_properties): + signal = next((item for item in self.signal_properties if item["name"] == timestream['label'].split('-')[0]), None) + if signal is None: + continue + + signal_channels.append(( + timestream['label'].split('-')[0], + i, + int(timestream['sampling_rate']), + 'int16', + signal['transducer'], + timestream['resolution'], + timestream['eeg_offset'], + 0)) + + return np.array(signal_channels, dtype = dtype) + + def _generate_additional_annotations(self): + for block_index in range(self.header['nb_block']): + bl_annotations = self.raw_annotations["blocks"][block_index] + bl_annotations['date'] = self.segments_properties[0]['date'] + try: + bl_annotations['firstname'] = self.patient_info['firstName'] + bl_annotations['surname'] = self.patient_info['lastName'] + except KeyError: + bl_annotations['name'] = self.patient_info['altID'] + bl_annotations['duration'] = sum([properties['duration'].total_seconds() for properties in self.segments_properties]) + for i, seg_annotations in enumerate(bl_annotations['segments']): + try: + seg_annotations['firstname'] = self.patient_info['firstName'] + seg_annotations['surname'] = self.patient_info['lastName'] + except KeyError: + seg_annotations['name'] = self.patient_info['altID'] + seg_annotations['date'] = self.segments_properties[i]['date'] + seg_annotations['duration'] = self.segments_properties[i]['duration'].total_seconds() + for event_types in seg_annotations['events']: + event_types['__array_annotations__']['nb_events'] = len([event for event in self.events + if event['seg_index'] == i and event['type'] == event_types['id']]) + + + + + + + def _get_analogsignal_chunk( + self, + block_index: int = 0, + seg_index: int = 0, + i_start: int = None, + i_stop: int = None, + stream_index: int = None, + channel_indexes: np.ndarray | list | slice = None, + ): + + + + if block_index >= self.header['nb_block']: + raise IndexError(f"Block Index out of range. There are {self.header['nb_block']} blocks in the file") + + if seg_index >= self.header['nb_segment'][block_index]: + raise IndexError(f"Segment Index out of range. There are {self.header['nb_segment'][block_index]} segments for block {block_index}") + + + + + if channel_indexes is None: + channel_indexes = list(range(0,len(self.ts_properties))) + nb_chan = len(channel_indexes) + elif isinstance(channel_indexes, slice): + channel_indexes = np.arange(len(self.ts_properties), dtype="int")[channel_indexes] + nb_chan = len(channel_indexes) + else: + channel_indexes = np.asarray(channel_indexes) + if any(channel_indexes < 0): + raise IndexError("Channel Indices cannot be negative") + if any(channel_indexes >= len(self.ts_properties)): + raise IndexError("Channel Indices out of range") + nb_chan = len(channel_indexes) + + if i_start is None: + i_start = 0 + if i_stop is None: + i_stop = max(self.get_nr_samples(seg_index)) + if i_start < 0 or i_stop > max(self.get_nr_samples(seg_index)): + # some checks + raise IndexError("Start or Stop Index out of bounds") + + eeg_sampling_rate = max(self.segments_properties[seg_index]['sampling_rates']) + + + cum_segment_duration = [0] + list(np.cumsum([(segment['duration'].total_seconds()) for segment in self.segments_properties])) + + data = np.empty([i_stop - i_start, self.segments_properties[0]['sampling_rates'].count(eeg_sampling_rate)]) + + for i in channel_indexes: + print('Current Channel: ' + str(i)) + + current_samplingrate = self.segments_properties[seg_index]['sampling_rates'][i] + multiplicator = self.segments_properties[seg_index]['scale'][i] + + if current_samplingrate != eeg_sampling_rate: + continue + + [tag_idx] = [tag['index'] for tag in self.tags if tag['tag'] == str(i)] + all_sections = [j for j, idx_id in enumerate(self.all_section_ids) if idx_id == tag_idx] + section_lengths = [int(index['section_l']/2) for j, index in enumerate(self.main_index) if j in all_sections] + cum_section_lengths = [0] + list(np.cumsum(section_lengths)) + skip_values = cum_segment_duration[seg_index] * current_samplingrate + + first_section_for_seg = _get_relevant_section(cum_section_lengths, skip_values) - 1 + last_section_for_seg = _get_relevant_section(cum_section_lengths, + current_samplingrate* + self.segments_properties[seg_index]['duration'].total_seconds()) - 1 + first_section_for_seg + offset_section_lengths = [length - cum_section_lengths[first_section_for_seg] for length in cum_section_lengths] + first_section = _get_relevant_section(cum_section_lengths, i_start - 1) + last_section = _get_relevant_section(cum_section_lengths, i_stop + 1) - 1 + if last_section > last_section_for_seg: + raise IndexError(f'Index out of range for channel {i}') + + use_sections = all_sections[first_section:last_section] + use_sections_length = section_lengths[first_section:last_section] + + + np_idx = 0 + for section_idx, section_length in zip(use_sections, use_sections_length): + cur_sec = self.main_index[section_idx] + + start = int((cur_sec['offset'] - self.signal_data_offset)/2) #It appears that the offset was set for a file double the size + stop = start + section_length + data[np_idx:(np_idx + section_length), i] = multiplicator*self.raw_signal[slice(start, stop)] + np_idx += section_length + + return data + + def _segment_t_start(self, block_index: int, seg_index: int): + # this must return a float scaled in seconds + # this t_start will be shared by all objects in the segment + # except AnalogSignal + all_starts = [] + for block_index in range(self.header['nb_block']): + bl_annotation = self.raw_annotations["blocks"][block_index] + block_starts = [0] + startime = 0 + for seg_annotation in (bl_annotation['segments'][1:]): + startime += seg_annotation['duration'] + block_starts.append(float(startime)) + all_starts.append(block_starts) + return all_starts[block_index][seg_index] + + + def _segment_t_stop(self, block_index: int, seg_index: int): + # this must return a float scaled in seconds + all_stops = [] + for block_index in range(self.header['nb_block']): + bl_annotation = self.raw_annotations["blocks"][block_index] + block_stops = [] + stoptime = 0 + for seg_annotation in (bl_annotation['segments']): + stoptime += seg_annotation['duration'] + block_stops.append(float(stoptime)) + all_stops.append(block_stops) + return all_stops[block_index][seg_index] + + def _get_signal_size(self, block_index: int = 0, seg_index: int = 0, stream_index: int = 0): + # We generate fake data in which the two stream signals have the same shape + # across all segments (10.0 seconds) + # This is not the case for real data, instead you should return the signal + # size depending on the block_index and segment_index + # this must return an int = the number of samples + + # Note that channel_indexes can be ignored for most cases + # except for the case of several sampling rates. + return max(self.get_nr_samples(block_index = block_index, + seg_index = seg_index)) + + def _get_signal_t_start(self, block_index: int = 0, seg_index: int = 0, stream_index: int = 0): + #TODO: Find out if this means the start of the signal with respect to the segment, with respect to the block, or with respect to the entire file + # This give the t_start of a signal. + # Very often this is equal to _segment_t_start but not + # always. + # this must return a float scaled in seconds + + # Note that channel_indexes can be ignored for most cases + # except for the case of several sampling rates. + + # Here this is the same. + # this is not always the case + return self._segment_t_start(block_index, seg_index) + + def _spike_count(self, block_index: int, seg_index: int, spike_channel_index: int): + # Must return the nb of spikes for given (block_index, seg_index, spike_channel_index) + # we are lucky: our units have all the same nb of spikes!! + # it is not always the case + return 0 + + def _get_spike_timestamps( + self, block_index: int, seg_index: int, spike_channel_index: int, t_start: float | None, t_stop: float | None + ): + return None + + def _rescale_spike_timestamp(self, spike_timestamps: np.ndarray, dtype: np.dtype): + # must rescale to seconds, a particular spike_timestamps + # with a fixed dtype so the user can choose the precision they want. + return None + + def _event_count(self, block_index: int = 0, seg_index: int = 0, event_channel_index: int = 0): + return self.raw_annotations['blocks'][block_index]['segments'][seg_index]['events'][event_channel_index]['__array_annotations__']['nb_events'] + + + def _get_event_timestamps( + self, block_index: int = 0, seg_index: int = 0, event_channel_index: int = 0, t_start: float = None, t_stop: float = None + ): + + events = [event for event in self.events if event['type'] == str(event_channel_index) and event['seg_index'] == seg_index] + + timestamp = np.array([event['timestamp'] for event in events], dtype="float64") + durations = np.array([event['duration'] for event in events], dtype="float64") + labels = np.array([event['id_str'] for event in events], dtype="U12") #TODO: Check if using id_str makes sense + + if t_start is not None: + keep = timestamp >= t_start + timestamp, durations, labels = timestamp[keep], durations[keep], labels[keep] + + if t_stop is not None: + keep = timestamp <= t_stop + timestamp, durations, labels = timestamp[keep], durations[keep], labels[keep] + + if seg_index == '0': + durations = None + + return timestamp, durations, labels + + def _rescale_event_timestamp(self, event_timestamps: np.ndarray, dtype: np.dtype, event_channel_index: int): + event_times = event_timestamps.astype(dtype) + return event_times + + def _rescale_epoch_duration(self, raw_duration: np.ndarray, dtype: np.dtype, event_channel_index: int): + durations = raw_duration.astype(dtype) + return durations + + def _get_index_instances(self, id_str = '', tag = ''): + identifier = 'id_str' + if tag: + identifier = 'tag' + id_str = tag + info_idx = [entry[identifier] for entry in self.tags].index(id_str) + matching_idx = [entry['section_idx'] == info_idx for entry in self.main_index] + idx_instance = [entry for entry, match in zip(self.main_index, matching_idx) if match] + return(idx_instance) + +#%% + + +def read_as_dict(fid, dtype): + info = dict() + dt = np.dtype(dtype) + h = np.frombuffer(fid.read(dt.itemsize), dt)[0] + for k in dt.names: + v = h[k] + v = _process_bytes(v, dt[k]) + info[k] = v + return info + +def read_as_list(fid, dtype): + #if offset is not None: + # fid.seek(offset) + dt = np.dtype(dtype) + h = np.frombuffer(fid.read(dt.itemsize), dt)[0][0] + h = _process_bytes(h, dt[0]) + return h + +def _process_bytes(byte_data, data_type): + is_list_of_binaries = (type(byte_data) == np.ndarray and type(byte_data[0]) == np.bytes_) + byte_obj = b''.join(byte_data) if is_list_of_binaries else byte_data + bytes_decoded = _decode_string(byte_obj) if data_type.kind == "S" or is_list_of_binaries else byte_obj + return bytes_decoded + +def _decode_string(string): + try: + string = string.decode("utf8") + except: + string = string.decode('latin_1') + string = string.replace("\x03", "") + string = string.replace("\x00", "") + return string + +def _convert_to_guid(hex_list, + guid_format = '{3}{2}{1}{0}-{5}{4}-{7}{6}-{8}{9}-{10}{11}{12}{13}{14}{15}'): + dec_list = [f'{nr:x}'.upper().rjust(2, '0') for nr in hex_list] + return('{' + guid_format.format(*dec_list) + '}') + +def _convert_to_date(data_float, origin = '31-12-1899'): + return(datetime.strptime(origin, '%d-%m-%Y') + + timedelta(days = int(data_float))) + +def _typecast(data, dtype_in = np.uint8, dtype_out = np.uint32): + data = np.array(data, dtype = dtype_in) + return(data.view(dtype_out)) + +def _transform_ts_properties(data, dtype): + cast_list = list(_typecast(data, dtype_out = dtype)) + if dtype == np.float64: + [cast_list] = cast_list + return(cast_list) + else: + return(_transform_char(cast_list)) + +def _transform_char(line): + if type(line) != list: line = [line] #TODO: Find a better way to do this + line = ''.join([chr(item) for item in line if chr(item) != '\x00']) + return line + +def _read_ts_properties(data, offset, internal_offset, dtype): + offset_modifier = 8 if (dtype == np.float64) else 2 + + top_range = offset + internal_offset + offset_modifier + value = _transform_ts_properties(data[(offset + internal_offset):top_range], dtype) + internal_offset += offset_modifier + return(value, internal_offset) + +def _get_relevant_section(lengths_list, to_compare): + try: + segment = min([j for j, length in enumerate(lengths_list) if length > to_compare]) + except ValueError: + segment = len(lengths_list) + return(segment) + + + + +if __name__ == '__main__': + + #file = NicoletRawIO(r'\\fsnph01\NPH_Research\xxx_PythonShare\nicolet_parser\data\janbrogger.e') + #file = NicoletRawIO(r'\\fsnph01\NPH_Research\xxx_PythonShare\nicolet_parser\data\Routine6t1.e') + #file = NicoletRawIO(r'\\fsnph01\NPH_Archiv\LTM\Band0299\58795\9140.e') + #file = NicoletRawIO(r'C:\temp\3407_2.e') + file = NicoletRawIO(r'C:\temp\3407_2.e') + #file = NicoletRawIO(r'C:\temp\Patient20_ABLEIT53_t1.e') + file._parse_header() + tags = file.tags + qi = file.qi + main_index = file.main_index + dynamic_packets = file.dynamic_packets + + pat_info = file.patient_info + signal_structure = file.signal_structure + signal_properties = file.signal_properties + + channel_structure = file.channel_structure + channel_properties = file.channel_properties + + ts_properties = file.ts_properties + ts_packets = file.ts_packets + + segments_properties = file.segments_properties + + events = file.events + + montages = file.montages + + raw_signal = file.raw_signal + print('reading data') + data = file._get_analogsignal_chunk() + + header = file.header + + raw_annotations = file.raw_annotations + + + #Construct an mne object + +#%% + + From cce4b69386d7de811ee41aea0b400eb5f7f09bdc Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Mon, 5 Aug 2024 09:15:44 +0200 Subject: [PATCH 02/63] Add Reader for all TS-Info --- neo/rawio/nicoletrawio.py | 75 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 71 insertions(+), 4 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index b465cd9dc..26a1bb67f 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -428,7 +428,60 @@ def _get_channel_info(self): self.channel_structure = channel_structure self.channel_properties = channel_properties + + def _get_ts_properties_all(self): + ts_packets_properties = [] + + ts_packets = [packet for packet in self.dynamic_packets if packet['id_str'] == 'TSGUID'] + l_ts_packets = len(ts_packets) + + for ts_packet in ts_packets: + ts_properties = [] + #TODO: Add support for multiple TS-Info packages + elems = _typecast(ts_packet['data'][752:756])[0] + alloc = _typecast(ts_packet['data'][756:760])[0] + offset = 760 + + for i in range(elems): + internal_offset = 0 + top_range = (offset + self.TSLABELSIZE) + + label = _transform_ts_properties(ts_packet['data'][offset:top_range], np.uint16) + + + internal_offset += 2*self.TSLABELSIZE + top_range = offset + internal_offset + self.LABELSIZE + active_sensor = _transform_ts_properties(ts_packet['data'][(offset + internal_offset):top_range], np.uint16) + internal_offset = internal_offset + self.TSLABELSIZE; + top_range = offset + internal_offset + 8 + ref_sensor = _transform_ts_properties(ts_packet['data'][(offset + internal_offset):top_range], np.uint16) + internal_offset += 64; + low_cut, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) + high_cut, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) + sampling_rate, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) + resolution, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) + mark, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.uint16) + notch, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.uint16) + eeg_offset, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) + offset += 552 + ts_properties.append({ + 'label' : label, + 'active_sensor' : active_sensor, + 'ref_sensor' : ref_sensor, + 'low_cut' : low_cut, + 'high_cut' : high_cut, + 'sampling_rate' : sampling_rate, + 'resolution' : resolution, + 'notch' : notch, + 'mark' : mark, + 'eeg_offset' : eeg_offset}) + + ts_packets_properties.append(ts_properties) + self.ts_packets = ts_packets + self.ts_packets_properties = ts_packets_properties + pass + def _get_ts_properties(self, ts_packet_index = 0): ts_properties = [] @@ -1079,8 +1132,11 @@ def _get_relevant_section(lengths_list, to_compare): #file = NicoletRawIO(r'\\fsnph01\NPH_Research\xxx_PythonShare\nicolet_parser\data\Routine6t1.e') #file = NicoletRawIO(r'\\fsnph01\NPH_Archiv\LTM\Band0299\58795\9140.e') #file = NicoletRawIO(r'C:\temp\3407_2.e') - file = NicoletRawIO(r'C:\temp\3407_2.e') - #file = NicoletRawIO(r'C:\temp\Patient20_ABLEIT53_t1.e') + #file = NicoletRawIO(r'C:\temp\7280.e') + + + + file = NicoletRawIO(r'C:\temp\Patient20_ABLEIT53_t1.e') file._parse_header() tags = file.tags qi = file.qi @@ -1105,7 +1161,7 @@ def _get_relevant_section(lengths_list, to_compare): raw_signal = file.raw_signal print('reading data') - data = file._get_analogsignal_chunk() + #data = file._get_analogsignal_chunk() header = file.header @@ -1114,6 +1170,17 @@ def _get_relevant_section(lengths_list, to_compare): #Construct an mne object + #Compare ts packages + #TODO: Find differences in TS_Packets, and make the use of all the ts_packages in case there are any differences + + file._get_ts_properties_all() + for i, ts1 in enumerate(file.ts_packets_properties): + for j, ts2 in enumerate(file.ts_packets_properties): + pairs = zip(ts1, ts2) + print(f'Comparing ts{i} to ts{j}:') + print(f'Any mismatches: {any(x != y for x, y in pairs)}') + + [datetime.fromtimestamp(entry['date']*file.SEC_PER_DAY - file.UNIX_TIME_CONVERSION) for entry in file.ts_packets] + #%% - From 7d7d40981d69ccfa048d6f0460439bc6ba497f36 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Mon, 5 Aug 2024 10:41:54 +0200 Subject: [PATCH 03/63] Add functions to go through all e files --- neo/rawio/nicoletrawio.py | 91 ++++++++++++++++++++------------------- 1 file changed, 47 insertions(+), 44 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index 26a1bb67f..4e0853c89 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -12,6 +12,7 @@ from __future__ import annotations import numpy as np +import pandas as pd import warnings from datetime import datetime, timedelta from pathlib import Path @@ -553,7 +554,6 @@ def _get_segment_start_times(self): fid.seek(8,1) segment_info['duration'] = read_as_list(fid, [('duration', 'float64')]) - print(segment_info['duration']) fid.seek(128, 1) segment_info['ch_names'] = [info['label'] for info in self.ts_properties] segment_info['ref_names'] = [info['ref_sensor'] for info in self.ts_properties] @@ -1123,7 +1123,38 @@ def _get_relevant_section(lengths_list, to_compare): segment = len(lengths_list) return(segment) + + +def get_ts_info_mismatches(): + nic_filepaths = get_file_list() + mismatch_list = [[filepath, compare_ts_info(NicoletRawIO(filepath))] for filepath in nic_filepaths] + return pd.DataFrame(mismatch_list, columns = ['filename', 'status']) + +def compare_ts_info(file): + mismatch = 'no_mismatch' + print(f'Comparing file {file.filepath}') + try: + file._parse_header() + file._get_ts_properties_all() + except: + return 'not_readable' + + for i, ts1 in enumerate(file.ts_packets_properties): + for j, ts2 in enumerate(file.ts_packets_properties): + pairs = zip(ts1, ts2) + if any(x != y for x, y in pairs): + mismatch = 'mismatch' + + return mismatch + + + +def get_file_list(): + from glob import glob + root = r'\\fsnph01\NPH_Archiv\LTM' + + return glob('\\'.join([root, '**', '**' , '*.e'])) if __name__ == '__main__': @@ -1132,55 +1163,27 @@ def _get_relevant_section(lengths_list, to_compare): #file = NicoletRawIO(r'\\fsnph01\NPH_Research\xxx_PythonShare\nicolet_parser\data\Routine6t1.e') #file = NicoletRawIO(r'\\fsnph01\NPH_Archiv\LTM\Band0299\58795\9140.e') #file = NicoletRawIO(r'C:\temp\3407_2.e') - #file = NicoletRawIO(r'C:\temp\7280.e') - - - file = NicoletRawIO(r'C:\temp\Patient20_ABLEIT53_t1.e') + file = NicoletRawIO(r'\\fsnph01\NPH_Archiv\LTM\Band0193\20568\Patient3_ABLEIT25_t2.e') + ''' file._parse_header() - tags = file.tags - qi = file.qi - main_index = file.main_index - dynamic_packets = file.dynamic_packets - - pat_info = file.patient_info - signal_structure = file.signal_structure - signal_properties = file.signal_properties - - channel_structure = file.channel_structure - channel_properties = file.channel_properties - - ts_properties = file.ts_properties - ts_packets = file.ts_packets - - segments_properties = file.segments_properties - - events = file.events - - montages = file.montages - - raw_signal = file.raw_signal - print('reading data') - #data = file._get_analogsignal_chunk() - - header = file.header - - raw_annotations = file.raw_annotations - - - #Construct an mne object - - #Compare ts packages - #TODO: Find differences in TS_Packets, and make the use of all the ts_packages in case there are any differences - file._get_ts_properties_all() + ts_packets = file.ts_packets_properties for i, ts1 in enumerate(file.ts_packets_properties): for j, ts2 in enumerate(file.ts_packets_properties): pairs = zip(ts1, ts2) - print(f'Comparing ts{i} to ts{j}:') - print(f'Any mismatches: {any(x != y for x, y in pairs)}') - - [datetime.fromtimestamp(entry['date']*file.SEC_PER_DAY - file.UNIX_TIME_CONVERSION) for entry in file.ts_packets] + if any(x != y for x, y in pairs): + print(f'Mismatch between {i} and {j}') + ''' + + mismatch_df = get_ts_info_mismatches() + mismatch_df.columns = ['filename', 'status'] + mismatched_ts_info = mismatch_df[mismatch_df.status == True] + not_read = mismatch_df[mismatch_df.status == 'not_readable'] + mismatch_df.to_csv(r'C:\temp\tsinfo_mismatches_lzm193.csv') + #TODO: Find differences in TS_Packets, and categorise them + #Then, check if they make a difference + #temp = glob('\\'.join([root, '**', '**' , '*.e'])) ca 55k files #%% From 67a08f9111a417445b47346c60782d683e07bd34 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Mon, 9 Sep 2024 07:58:03 +0200 Subject: [PATCH 04/63] Add test.py --- neo/test/rawiotest/test_nicoletrawio.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 neo/test/rawiotest/test_nicoletrawio.py diff --git a/neo/test/rawiotest/test_nicoletrawio.py b/neo/test/rawiotest/test_nicoletrawio.py new file mode 100644 index 000000000..c7b0b8ba8 --- /dev/null +++ b/neo/test/rawiotest/test_nicoletrawio.py @@ -0,0 +1,22 @@ +""" + +""" + +import unittest + +from neo.rawio.nicoletrawio import NicoletRawIO + +from neo.test.rawiotest.common_rawio_test import BaseTestRawIO + + +class TestNicoletRawIO( + BaseTestRawIO, + unittest.TestCase, +): + rawioclass = NicoletRawIO + entities_to_download = ["nicolet"] + entities_to_test = ["nicolet/File_nicolet_1.TRC"] + + +if __name__ == "__main__": + unittest.main() From b826a25d7b3b34d59de5c4cb8d7b947836fcfd49 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Mon, 9 Sep 2024 07:59:26 +0200 Subject: [PATCH 05/63] Add test.py --- neo/test/iotest/test_nicoletio.py | 111 ++++++++++++++++++++++++++++++ 1 file changed, 111 insertions(+) create mode 100644 neo/test/iotest/test_nicoletio.py diff --git a/neo/test/iotest/test_nicoletio.py b/neo/test/iotest/test_nicoletio.py new file mode 100644 index 000000000..d5ef79251 --- /dev/null +++ b/neo/test/iotest/test_nicoletio.py @@ -0,0 +1,111 @@ +""" +Tests of neo.io.exampleio +""" + +import pathlib +import unittest + +from neo.io.nicoletio import NicoletIO +from neo.test.iotest.common_io_test import BaseTestIO +from neo.test.iotest.tools import get_test_file_full_path +from neo.io.proxyobjects import AnalogSignalProxy, SpikeTrainProxy, EventProxy, EpochProxy +from neo import AnalogSignal, SpikeTrain + +import quantities as pq +import numpy as np + + +# This run standart tests, this is mandatory for all IO +class TestNicoletIO( + BaseTestIO, + unittest.TestCase, +): + ioclass = NicoletIO + entities_to_download = ['nicolet'] + entities_to_test = ["nicolet/File_nicolet_1.TRC"] + + def setUp(self): + super().setUp() + # ensure fake test files exist before running common tests + for entity in self.entities_to_test: + full_path = get_test_file_full_path(self.ioclass, filename=entity, directory=self.local_test_dir) + pathlib.Path(full_path).touch() + + def tearDown(self) -> None: + super().tearDown() + for entity in self.entities_to_test: + full_path = get_test_file_full_path(self.ioclass, filename=entity, directory=self.local_test_dir) + pathlib.Path(full_path).unlink(missing_ok=True) + + +# This is the minimal variables that are required +# to run the common IO tests. IO specific tests +# can be added here and will be run automatically +# in addition to the common tests. +class Specific_NicoletIO(unittest.TestCase): + def test_read_segment_lazy(self): + r = NicoletIO(filename=None) + seg = r.read_segment(lazy=True) + for ana in seg.analogsignals: + assert isinstance(ana, AnalogSignalProxy) + ana = ana.load() + assert isinstance(ana, AnalogSignal) + for st in seg.spiketrains: + assert isinstance(st, SpikeTrainProxy) + st = st.load() + assert isinstance(st, SpikeTrain) + + seg = r.read_segment(lazy=False) + for anasig in seg.analogsignals: + assert isinstance(ana, AnalogSignal) + self.assertNotEqual(anasig.size, 0) + for st in seg.spiketrains: + assert isinstance(st, SpikeTrain) + self.assertNotEqual(st.size, 0) + + # annotations + assert "seg_extra_info" in seg.annotations + assert seg.name == "Seg #0 Block #0" + for anasig in seg.analogsignals: + assert anasig.name is not None + for st in seg.spiketrains: + assert st.name is not None + for ev in seg.events: + assert ev.name is not None + for ep in seg.epochs: + assert ep.name is not None + + def test_read_block(self): + r = NicoletIO(filename=None) + bl = r.read_block(lazy=True) + # assert len(bl.list_units) == 3 + # assert len(bl.channel_indexes) == 1 + 1 # signals grouped + units grouped + + def test_read_segment_with_time_slice(self): + r = NicoletIO(filename=None) + seg = r.read_segment(time_slice=None) + shape_full = seg.analogsignals[0].shape + spikes_full = seg.spiketrains[0] + event_full = seg.events[0] + + t_start, t_stop = 260 * pq.ms, 1.854 * pq.s + seg = r.read_segment(time_slice=(t_start, t_stop)) + shape_slice = seg.analogsignals[0].shape + spikes_slice = seg.spiketrains[0] + event_slice = seg.events[0] + + assert shape_full[0] > shape_slice[0] + + assert spikes_full.size > spikes_slice.size + assert np.all(spikes_slice >= t_start) + assert np.all(spikes_slice <= t_stop) + assert spikes_slice.t_start == t_start + assert spikes_slice.t_stop == t_stop + + assert event_full.size > event_slice.size + assert np.all(event_slice.times >= t_start) + assert np.all(event_slice.times <= t_stop) + + +if __name__ == "__main__": + unittest.main() From 30393b7cf6414c919ee1f6f848f31687f7e32036 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Fri, 13 Sep 2024 10:31:59 +0200 Subject: [PATCH 06/63] Remove Comments and Reduce Blank Lines --- neo/rawio/nicoletrawio.py | 342 +++++--------------------------------- 1 file changed, 40 insertions(+), 302 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index 4e0853c89..d6ffc2aa9 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -5,8 +5,6 @@ The original authors of the MATLAB implementation are Joost Wagenaar, Cristian Donos, Jan Brogger and Callum Stewart. Author: Murezi Capaul - -CHECK IF THIS CHANGE IS ALSO ON THE MAIN BRANCH AND DEV BRANCH """ from __future__ import annotations @@ -47,7 +45,7 @@ class NicoletRawIO(BaseRawIO): TSLABELSIZE = 64 UNITSIZE = 16 ITEMNAMESIZE = 64 - UNIX_TIME_CONVERSION = 2209161600 #TODO: Currently, time is always UTC. Find where to read timezones + UNIX_TIME_CONVERSION = 2209161600 SEC_PER_DAY = 86400 TAGS_DICT = { @@ -103,7 +101,6 @@ class NicoletRawIO(BaseRawIO): 'city','state','country','language','height','weight','race','religion', 'maritalStatus'] - #TODO: Find more translations for events guids -> Open file in Nicolet Viewer and here, and compare names of events HC_EVENT = { '{A5A95612-A7F8-11CF-831A-0800091B5BDA}' : 'Annotation', '{A5A95646-A7F8-11CF-831A-0800091B5BDA}' : 'Seizure', @@ -126,26 +123,24 @@ class NicoletRawIO(BaseRawIO): '{B0BECF64-E669-42B1-AE20-97A8B0BBEE26}' : 'Toilet', '{A5A95611-A7F8-11CF-831A-0800091B5BDA}' : 'Fix Electrode'} - def __init__(self, filepath = ""): BaseRawIO.__init__(self) self.filepath = Path(filepath) def _source_name(self): return self.filepath - def _parse_header(self): self._extract_header_information() self.header = {} - self.header["nb_block"] = 1 #TODO: Find out if multiple blocks exist + self.header["nb_block"] = 1 self.header["nb_segment"] = [len(self.segments_properties)] - self.header["signal_streams"] = np.array([("Signals", "0")], #TODO: Consider implementing all recorded channels after finding out if they make sense + self.header["signal_streams"] = np.array([("Signals", "0")], dtype=_signal_stream_dtype) - self.header["signal_channels"] = self._create_signal_channels(dtype = _signal_channel_dtype) if self.channel_properties else self._create_signal_channels_no_channel_props(_signal_channel_dtype) - self.header["spike_channels"] = np.array([], #TODO: Find if there is automatic spike detection + self.header["signal_channels"] = self._create_signal_channels(_signal_channel_dtype) if self.channel_properties else self._create_signal_channels_no_channel_props(_signal_channel_dtype) + self.header["spike_channels"] = np.array([], dtype= _spike_channel_dtype) - self.header["event_channels"] = np.array([("Events", "0", "event"), #TODO: Find if there are more types of events that can be identified + self.header["event_channels"] = np.array([("Events", "0", "event"), ("Epochs", "1", "epoch")], dtype = _event_channel_dtype) self._generate_minimal_annotations() @@ -153,31 +148,29 @@ def _parse_header(self): def _get_tags(self): misc_structure = [ - ('misc1', 'uint32', 5), - #('unknown', 'uint32'), - #('index_idx', 'uint32') + ('misc1', 'uint32', 5) ] tags_structure = [ ('tag', 'S80'), ('index', 'uint32')] with open(self.filepath, "rb") as fid: + index_idx = read_as_dict(fid, misc_structure) unknown = read_as_dict(fid, [('unknown', 'uint32', 1)]) + fid.seek(172) n_tags = read_as_list(fid, [('n_tags', 'uint32')]) tags = [read_as_dict(fid, tags_structure) for _ in range(n_tags)] - for entry in tags: try: entry['id_str'] = self.TAGS_DICT[entry['tag']] except KeyError: - entry['id_str'] = 'UNKNOWN' - + entry['id_str'] = 'UNKNOWN' self.n_tags = n_tags self.index_idx = index_idx self.tags = tags @@ -198,8 +191,6 @@ def _get_qi(self): self.qi = qi def _get_main_index(self): - #TODO: Find file with multiple index pointers to test the while loop - #TODO: Find out what multiple block lengths mean main_index = [] current_index = 0 next_index_pointer = self.qi['index_idx'] @@ -211,22 +202,25 @@ def _get_main_index(self): ) var = read_as_list(fid, [('var', 'uint64', int(3*nr_index))]) - for i in range(nr_index): main_index.append({ 'section_idx' : int(var[3*(i)]), 'offset' : int(var[3*(i)+1]), 'block_l' : int(var[3*(i)+2] % 2**32), 'section_l' : round(var[3*(i)+2]/(2**32))}) - next_index_pointer = read_as_list(fid, [('next_index_pointer', 'uint64')]) current_index = current_index + (i + 1) - self.main_index = main_index self.all_section_ids = [entry['section_idx'] for entry in main_index] def _read_dynamic_packets(self): + dynamic_packet_structure = [ + ('guid_list', 'uint8', 16), + ('date', 'float64'), + ('datefrace', 'float64'), + ('internal_offset_start', 'uint64'), + ('packet_size', 'uint64')] dynamic_packets = [] [dynamic_packets_instace] = self._get_index_instances(id_str = 'InfoChangeStream') offset = dynamic_packets_instace['offset'] @@ -235,34 +229,21 @@ def _read_dynamic_packets(self): fid.seek(offset) for i in range(self.n_dynamic_packets): guid_offset = offset + (i+1)*48 - - dynamic_packet_structure = [ - ('guid_list', 'uint8', 16), - ('date', 'float64'), - ('datefrace', 'float64'), - ('internal_offset_start', 'uint64'), - ('packet_size', 'uint64')] - dynamic_packet = read_as_dict(fid, dynamic_packet_structure) guid_as_str = _convert_to_guid(dynamic_packet['guid_list']) - - if guid_as_str in list(self.TAGS_DICT.keys()): id_str = self.TAGS_DICT[guid_as_str] else: - id_str = 'UNKNOWN' - + id_str = 'UNKNOWN' dynamic_packet['offset'] = int(guid_offset) dynamic_packet['guid'] = guid_as_str.replace('-', '').replace('{', '').replace('}', '') dynamic_packet['guid_as_str'] = guid_as_str dynamic_packet['id_str'] = id_str - dynamic_packets.append(dynamic_packet) self.dynamic_packets = dynamic_packets - + def _get_dynamic_packets_data(self): - #TODO: Try to merge into _read_dynamic_packets with open(self.filepath, "rb") as fid: for i in range(self.n_dynamic_packets): data = [] @@ -274,23 +255,18 @@ def _get_dynamic_packets_data(self): current_instance = dynamic_packet_instances[j] if ((internal_offset <= (current_target_start)) & ((internal_offset + current_instance['section_l']) >= current_target_start)): - start_at = current_target_start stop_at = min(start_at + remaining_data_to_read, internal_offset + current_instance['section_l']) read_length = stop_at - start_at - file_pos_start = current_instance['offset'] + start_at - internal_offset fid.seek(int(file_pos_start)) data_part = read_as_list(fid, [('data', 'uint8', read_length)]) data = data + list(data_part) - remaining_data_to_read = remaining_data_to_read - read_length current_target_start = current_target_start + read_length - - internal_offset = internal_offset + current_instance['section_l'] - + internal_offset = internal_offset + current_instance['section_l'] self.dynamic_packets[i]['data'] = np.array(data) def _get_patient_guid(self): @@ -305,11 +281,9 @@ def _get_patient_guid(self): patient_info = read_as_dict(fid, patient_info_structure ) - for i in range(patient_info['n_values']): id_temp = read_as_list(fid, [('value', 'uint64')]) - if id_temp in [7, 8]: value = read_as_list(fid, [('value', 'float64')]) @@ -320,26 +294,30 @@ def _get_patient_guid(self): else: value = 0 patient_info[self.INFO_PROPS[int(id_temp) - 1]] = value - - str_setup = read_as_list(fid, [('setup', 'uint64', int(patient_info['n_bstr']*2))]) - for i in range(0, int(patient_info['n_bstr']*2), 2): id_temp = str_setup[i] value = ''.join([read_as_list(fid, [('value', 'S2')]) for _ in range(int(str_setup[i + 1]) + 1)]).strip() patient_info[self.INFO_PROPS[int(id_temp) - 1]] = value - pass - + pass #TODO: Find out if removing 'pass' makes any difference self.patient_info = patient_info def _get_signal_properties(self): + signal_properties_segment = [ + ('name', 'S2', self.LABELSIZE), + ('transducer', 'S2', self.UNITSIZE), + ('guid', 'uint8', 16), + ('bipolar', 'uint32'), + ('ac', 'uint32'), + ('high_filter', 'uint32'), + ('color', 'uint32'), + ] signal_properties = [] signal_structure_segment = [ ('guid', 'uint8', 16), ('name', 'S1', self.ITEMNAMESIZE)] - idx_instances = self._get_index_instances('SIGNALINFOGUID') for instance in idx_instances: with open(self.filepath, "rb") as fid: @@ -352,28 +330,12 @@ def _get_signal_properties(self): n_idx = read_as_dict(fid, [('n_idx', 'uint16'), ('misc1', 'uint16', 3)]) - for i in range(n_idx['n_idx']): - - signal_properties_segment = [ - ('name', 'S2', self.LABELSIZE), - ('transducer', 'S2', self.UNITSIZE), - ('guid', 'uint8', 16), - ('bipolar', 'uint32'), - ('ac', 'uint32'), - ('high_filter', 'uint32'), - ('color', 'uint32'), - - ] - properties = read_as_dict(fid, signal_properties_segment) - #TODO: Consider setting On and Biploar to T/F - signal_properties.append(properties) reserved = read_as_list(fid, [('reserved', 'S1', 256)]) - self.signal_structure = signal_structure self.signal_properties = signal_properties pass @@ -386,9 +348,7 @@ def _get_channel_info(self): [('reserved', 'uint8', 16), ('device_id', 'uint8', 16)] ] - [idx_instance] = self._get_index_instances('CHANNELGUID') - with open(self.filepath, "rb") as fid: fid.seek(idx_instance['offset']) channel_structure = read_as_dict(fid, @@ -399,57 +359,42 @@ def _get_channel_info(self): fid.seek(488,1) n_index = read_as_list(fid, [('n_index', 'int32', 2)]) - current_index = 0 for i in range(n_index[1]): - channel_properties_structure = [ ('sensor', 'S2', self.LABELSIZE), ('sampling_rate', 'float64'), ('on', 'uint32'), ('l_input_id', 'uint32'), ('l_input_setting_id', 'uint32')] - info = read_as_dict(fid, channel_properties_structure) fid.seek(128, 1) - if info['on']: index_id = current_index current_index += 1 else: index_id = -1 - info['index_id'] = index_id - channel_properties.append(info) - reserved = read_as_list(fid, [('reserved', 'S1', 4)]) - self.channel_structure = channel_structure self.channel_properties = channel_properties def _get_ts_properties_all(self): ts_packets_properties = [] - ts_packets = [packet for packet in self.dynamic_packets if packet['id_str'] == 'TSGUID'] l_ts_packets = len(ts_packets) - for ts_packet in ts_packets: ts_properties = [] - #TODO: Add support for multiple TS-Info packages elems = _typecast(ts_packet['data'][752:756])[0] alloc = _typecast(ts_packet['data'][756:760])[0] offset = 760 - for i in range(elems): internal_offset = 0 top_range = (offset + self.TSLABELSIZE) - label = _transform_ts_properties(ts_packet['data'][offset:top_range], np.uint16) - - internal_offset += 2*self.TSLABELSIZE top_range = offset + internal_offset + self.LABELSIZE active_sensor = _transform_ts_properties(ts_packet['data'][(offset + internal_offset):top_range], np.uint16) @@ -457,7 +402,6 @@ def _get_ts_properties_all(self): top_range = offset + internal_offset + 8 ref_sensor = _transform_ts_properties(ts_packet['data'][(offset + internal_offset):top_range], np.uint16) internal_offset += 64; - low_cut, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) high_cut, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) sampling_rate, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) @@ -477,7 +421,6 @@ def _get_ts_properties_all(self): 'notch' : notch, 'mark' : mark, 'eeg_offset' : eeg_offset}) - ts_packets_properties.append(ts_properties) self.ts_packets = ts_packets self.ts_packets_properties = ts_packets_properties @@ -485,26 +428,20 @@ def _get_ts_properties_all(self): def _get_ts_properties(self, ts_packet_index = 0): ts_properties = [] - ts_packets = [packet for packet in self.dynamic_packets if packet['id_str'] == 'TSGUID'] l_ts_packets = len(ts_packets) self.ts_packets = ts_packets if l_ts_packets > 0: - #TODO: Add support for multiple TS-Info packages if l_ts_packets > 1: warnings.warn(f'{l_ts_packets} TSinfo packets detected; using first instance for all segments. See documentation for info') ts_packet = ts_packets[ts_packet_index] elems = _typecast(ts_packet['data'][752:756])[0] alloc = _typecast(ts_packet['data'][756:760])[0] offset = 760 - for i in range(elems): internal_offset = 0 top_range = (offset + self.TSLABELSIZE) - label = _transform_ts_properties(ts_packet['data'][offset:top_range], np.uint16) - - internal_offset += 2*self.TSLABELSIZE top_range = offset + internal_offset + self.LABELSIZE active_sensor = _transform_ts_properties(ts_packet['data'][(offset + internal_offset):top_range], np.uint16) @@ -512,7 +449,6 @@ def _get_ts_properties(self, ts_packet_index = 0): top_range = offset + internal_offset + 8 ref_sensor = _transform_ts_properties(ts_packet['data'][(offset + internal_offset):top_range], np.uint16) internal_offset += 64; - low_cut, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) high_cut, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) sampling_rate, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) @@ -532,21 +468,14 @@ def _get_ts_properties(self, ts_packet_index = 0): 'notch' : notch, 'mark' : mark, 'eeg_offset' : eeg_offset}) - self.ts_properties = ts_properties - pass - def _get_segment_start_times(self): segments_properties = [] - [segment_instance] = self._get_index_instances('SegmentStream') n_segments = int(segment_instance['section_l']/152) - with open(self.filepath, "rb") as fid: - fid.seek(segment_instance['offset'], 0) - for i in range(n_segments): segment_info = {} segment_info['date_ole'] = read_as_list(fid, @@ -559,7 +488,6 @@ def _get_segment_start_times(self): segment_info['ref_names'] = [info['ref_sensor'] for info in self.ts_properties] segment_info['sampling_rates'] = [info['sampling_rate'] for info in self.ts_properties] segment_info['scale'] = [info['resolution'] for info in self.ts_properties] - date_str = datetime.fromtimestamp(segment_info['date_ole']*self.SEC_PER_DAY - self.UNIX_TIME_CONVERSION) start_date = date_str.date() start_time = date_str.time() @@ -569,13 +497,10 @@ def _get_segment_start_times(self): segment_info['duration'] = timedelta(seconds = segment_info['duration']) segments_properties.append(segment_info) self.segments_properties = segments_properties - pass def _get_events(self): events = [] - event_packet_guid = '{B799F680-72A4-11D3-93D3-00500400C148}' - event_instances = self._get_index_instances(tag = 'Events') for instance in event_instances: offset = instance['offset'] @@ -588,8 +513,6 @@ def _get_events(self): pkt_structure) pkt['guid'] = _convert_to_guid(pkt['guid']) n_events = 0 - - while (pkt['guid'] == event_packet_guid): event_structure = [ [('date_ole', 'float64'), @@ -600,7 +523,6 @@ def _get_events(self): ('guid', 'uint8', 16)], [('label', 'S2', 32)] ] - n_events += 1 fid.seek(8, 1) event = read_as_dict(fid, @@ -611,11 +533,9 @@ def _get_events(self): fid.seek(16, 1) event = event | read_as_dict(fid, event_structure[2]) - event['date'] = datetime.fromtimestamp(event['date_ole']*self.SEC_PER_DAY + event['date_fraction'] - self.UNIX_TIME_CONVERSION) event['timestamp'] = (event['date'] - self.segments_properties[0]['date']).total_seconds() event['guid'] = _convert_to_guid(event['guid']) - try: id_str = self.HC_EVENT[event['guid']] except: @@ -628,7 +548,6 @@ def _get_events(self): annotation = '' event['id_str'] = id_str event['annotation'] = annotation - event['block_index'] = 0 seg_index = 0 segment_time_range = [segment['date'] for segment in self.segments_properties] @@ -637,15 +556,12 @@ def _get_events(self): seg_index += 1 event['seg_index'] = seg_index events.append(event) - event['type'] = '0' if event['duration'] == 0 else '1' - offset += int(pkt['len']) fid.seek(offset) pkt = read_as_dict(fid, pkt_structure) pkt['guid'] = _convert_to_guid(pkt['guid']) - self.events = events pass @@ -653,10 +569,8 @@ def _get_events(self): def _get_montage(self): montages = [] - montage_instances = self._get_index_instances(id_str = 'DERIVATIONGUID') with open(self.filepath, "rb") as fid: - montage_info_structure = [ [('name', 'S2', 32)], [('n_derivations', 'uint32'), @@ -676,10 +590,8 @@ def _get_montage(self): montage = montage_info | read_as_dict(fid, montage_info_structure[2]) fid.seek(264, 1) - montages.append(montage) display_instances = self._get_index_instances(id_str = 'DISPLAYGUID') - display_structure = [[ ('name', 'S2', 32)], [('n_traces', 'uint32'), @@ -692,7 +604,6 @@ def _get_montage(self): fid.seek(640, 1) display = display | read_as_dict(fid, display_structure[1]) - if display['n_traces'] == montage_info['n_derivations']: for i in range(display['n_traces']): fid.seek(32, 1) @@ -701,7 +612,6 @@ def _get_montage(self): display_structure[2]) else: print('Could not match montage derivations with display color table') - self.montages = montages self.display = display @@ -713,7 +623,6 @@ def get_nr_samples(self, block_index = 0, seg_index = 0): print(str(error) + ': Incorrect segment argument; seg_index must be an integer representing segment index, starting from 0.') pass - def _get_raw_signal(self): earliest_signal_index = [tag['tag'] for tag in self.tags].index('0') offset = [index['offset'] for index in self.main_index if index['section_idx'] == earliest_signal_index][0] @@ -737,25 +646,7 @@ def _extract_header_information(self): self._get_montage() self._get_raw_signal() - - def _create_signal_channels(self, dtype): - ''' - - - _signal_channel_dtype = [ - ("name", "U64"), # not necessarily unique -> sig, ch, ts - ("id", "U64"), # must be unique -> (sig), ch, (ts) - ("sampling_rate", "float64") -> - , ch, ts - ("dtype", "U16"), -> given (int16) - ("units", "U64"), -> sig, -, - - ("gain", "float64"), -, - , ts - ("offset", "float64"), -, -, ts - ("stream_id", "U64"), - ] - - - ''' signal_channels = [] for channel in self.channel_properties: signal = next((item for item in self.signal_properties if item["name"] == channel['sensor']), None) @@ -771,36 +662,14 @@ def _create_signal_channels(self, dtype): timestream['resolution'], timestream['eeg_offset'], 0)) - return np.array(signal_channels, dtype = dtype) def _create_signal_channels_no_channel_props(self, dtype): - ''' - - - _signal_channel_dtype = [ - ("name", "U64"), # not necessarily unique -> sig, ts (occasionaly bad names in ts) - ("id", "U64"), # must be unique -> Enumerate - ("sampling_rate", "float64") -> - , ts - ("dtype", "U16"), -> given (int16) - ("units", "U64"), -> sig, - - ("gain", "float64"), -, ts - ("offset", "float64"), -, ts - ("stream_id", "U64"), - ] - - - ''' - #TODO: Verify that using timestreams make sense by comparing a bunch of them - signal_channels = [] - - for i, timestream in enumerate(self.ts_properties): signal = next((item for item in self.signal_properties if item["name"] == timestream['label'].split('-')[0]), None) if signal is None: continue - signal_channels.append(( timestream['label'].split('-')[0], i, @@ -810,7 +679,6 @@ def _create_signal_channels_no_channel_props(self, dtype): timestream['resolution'], timestream['eeg_offset'], 0)) - return np.array(signal_channels, dtype = dtype) def _generate_additional_annotations(self): @@ -834,33 +702,18 @@ def _generate_additional_annotations(self): for event_types in seg_annotations['events']: event_types['__array_annotations__']['nb_events'] = len([event for event in self.events if event['seg_index'] == i and event['type'] == event_types['id']]) - - - - - - def _get_analogsignal_chunk( - self, - block_index: int = 0, - seg_index: int = 0, - i_start: int = None, - i_stop: int = None, - stream_index: int = None, - channel_indexes: np.ndarray | list | slice = None, - ): - - - + def _get_analogsignal_chunk(self, + block_index: int = 0, + seg_index: int = 0, + i_start: int = None, + i_stop: int = None, + stream_index: int = None, + channel_indexes: np.ndarray | list | slice = None): if block_index >= self.header['nb_block']: raise IndexError(f"Block Index out of range. There are {self.header['nb_block']} blocks in the file") - if seg_index >= self.header['nb_segment'][block_index]: raise IndexError(f"Segment Index out of range. There are {self.header['nb_segment'][block_index]} segments for block {block_index}") - - - - if channel_indexes is None: channel_indexes = list(range(0,len(self.ts_properties))) nb_chan = len(channel_indexes) @@ -874,37 +727,26 @@ def _get_analogsignal_chunk( if any(channel_indexes >= len(self.ts_properties)): raise IndexError("Channel Indices out of range") nb_chan = len(channel_indexes) - if i_start is None: i_start = 0 if i_stop is None: i_stop = max(self.get_nr_samples(seg_index)) if i_start < 0 or i_stop > max(self.get_nr_samples(seg_index)): - # some checks raise IndexError("Start or Stop Index out of bounds") - eeg_sampling_rate = max(self.segments_properties[seg_index]['sampling_rates']) - - - cum_segment_duration = [0] + list(np.cumsum([(segment['duration'].total_seconds()) for segment in self.segments_properties])) - + cum_segment_duration = [0] + list(np.cumsum([(segment['duration'].total_seconds()) for segment in self.segments_properties])) data = np.empty([i_stop - i_start, self.segments_properties[0]['sampling_rates'].count(eeg_sampling_rate)]) - for i in channel_indexes: print('Current Channel: ' + str(i)) - current_samplingrate = self.segments_properties[seg_index]['sampling_rates'][i] multiplicator = self.segments_properties[seg_index]['scale'][i] - if current_samplingrate != eeg_sampling_rate: continue - [tag_idx] = [tag['index'] for tag in self.tags if tag['tag'] == str(i)] all_sections = [j for j, idx_id in enumerate(self.all_section_ids) if idx_id == tag_idx] section_lengths = [int(index['section_l']/2) for j, index in enumerate(self.main_index) if j in all_sections] cum_section_lengths = [0] + list(np.cumsum(section_lengths)) skip_values = cum_segment_duration[seg_index] * current_samplingrate - first_section_for_seg = _get_relevant_section(cum_section_lengths, skip_values) - 1 last_section_for_seg = _get_relevant_section(cum_section_lengths, current_samplingrate* @@ -914,16 +756,12 @@ def _get_analogsignal_chunk( last_section = _get_relevant_section(cum_section_lengths, i_stop + 1) - 1 if last_section > last_section_for_seg: raise IndexError(f'Index out of range for channel {i}') - use_sections = all_sections[first_section:last_section] use_sections_length = section_lengths[first_section:last_section] - - np_idx = 0 for section_idx, section_length in zip(use_sections, use_sections_length): cur_sec = self.main_index[section_idx] - - start = int((cur_sec['offset'] - self.signal_data_offset)/2) #It appears that the offset was set for a file double the size + start = int((cur_sec['offset'] - self.signal_data_offset)/2) stop = start + section_length data[np_idx:(np_idx + section_length), i] = multiplicator*self.raw_signal[slice(start, stop)] np_idx += section_length @@ -931,9 +769,6 @@ def _get_analogsignal_chunk( return data def _segment_t_start(self, block_index: int, seg_index: int): - # this must return a float scaled in seconds - # this t_start will be shared by all objects in the segment - # except AnalogSignal all_starts = [] for block_index in range(self.header['nb_block']): bl_annotation = self.raw_annotations["blocks"][block_index] @@ -947,7 +782,6 @@ def _segment_t_start(self, block_index: int, seg_index: int): def _segment_t_stop(self, block_index: int, seg_index: int): - # this must return a float scaled in seconds all_stops = [] for block_index in range(self.header['nb_block']): bl_annotation = self.raw_annotations["blocks"][block_index] @@ -960,35 +794,13 @@ def _segment_t_stop(self, block_index: int, seg_index: int): return all_stops[block_index][seg_index] def _get_signal_size(self, block_index: int = 0, seg_index: int = 0, stream_index: int = 0): - # We generate fake data in which the two stream signals have the same shape - # across all segments (10.0 seconds) - # This is not the case for real data, instead you should return the signal - # size depending on the block_index and segment_index - # this must return an int = the number of samples - - # Note that channel_indexes can be ignored for most cases - # except for the case of several sampling rates. return max(self.get_nr_samples(block_index = block_index, seg_index = seg_index)) def _get_signal_t_start(self, block_index: int = 0, seg_index: int = 0, stream_index: int = 0): - #TODO: Find out if this means the start of the signal with respect to the segment, with respect to the block, or with respect to the entire file - # This give the t_start of a signal. - # Very often this is equal to _segment_t_start but not - # always. - # this must return a float scaled in seconds - - # Note that channel_indexes can be ignored for most cases - # except for the case of several sampling rates. - - # Here this is the same. - # this is not always the case return self._segment_t_start(block_index, seg_index) def _spike_count(self, block_index: int, seg_index: int, spike_channel_index: int): - # Must return the nb of spikes for given (block_index, seg_index, spike_channel_index) - # we are lucky: our units have all the same nb of spikes!! - # it is not always the case return 0 def _get_spike_timestamps( @@ -997,8 +809,6 @@ def _get_spike_timestamps( return None def _rescale_spike_timestamp(self, spike_timestamps: np.ndarray, dtype: np.dtype): - # must rescale to seconds, a particular spike_timestamps - # with a fixed dtype so the user can choose the precision they want. return None def _event_count(self, block_index: int = 0, seg_index: int = 0, event_channel_index: int = 0): @@ -1008,24 +818,18 @@ def _event_count(self, block_index: int = 0, seg_index: int = 0, event_channel_i def _get_event_timestamps( self, block_index: int = 0, seg_index: int = 0, event_channel_index: int = 0, t_start: float = None, t_stop: float = None ): - events = [event for event in self.events if event['type'] == str(event_channel_index) and event['seg_index'] == seg_index] - timestamp = np.array([event['timestamp'] for event in events], dtype="float64") durations = np.array([event['duration'] for event in events], dtype="float64") labels = np.array([event['id_str'] for event in events], dtype="U12") #TODO: Check if using id_str makes sense - if t_start is not None: keep = timestamp >= t_start timestamp, durations, labels = timestamp[keep], durations[keep], labels[keep] - if t_stop is not None: keep = timestamp <= t_stop timestamp, durations, labels = timestamp[keep], durations[keep], labels[keep] - if seg_index == '0': durations = None - return timestamp, durations, labels def _rescale_event_timestamp(self, event_timestamps: np.ndarray, dtype: np.dtype, event_channel_index: int): @@ -1046,9 +850,6 @@ def _get_index_instances(self, id_str = '', tag = ''): idx_instance = [entry for entry, match in zip(self.main_index, matching_idx) if match] return(idx_instance) -#%% - - def read_as_dict(fid, dtype): info = dict() dt = np.dtype(dtype) @@ -1060,8 +861,6 @@ def read_as_dict(fid, dtype): return info def read_as_list(fid, dtype): - #if offset is not None: - # fid.seek(offset) dt = np.dtype(dtype) h = np.frombuffer(fid.read(dt.itemsize), dt)[0][0] h = _process_bytes(h, dt[0]) @@ -1110,7 +909,6 @@ def _transform_char(line): def _read_ts_properties(data, offset, internal_offset, dtype): offset_modifier = 8 if (dtype == np.float64) else 2 - top_range = offset + internal_offset + offset_modifier value = _transform_ts_properties(data[(offset + internal_offset):top_range], dtype) internal_offset += offset_modifier @@ -1123,67 +921,7 @@ def _get_relevant_section(lengths_list, to_compare): segment = len(lengths_list) return(segment) - - -def get_ts_info_mismatches(): - nic_filepaths = get_file_list() - mismatch_list = [[filepath, compare_ts_info(NicoletRawIO(filepath))] for filepath in nic_filepaths] - - return pd.DataFrame(mismatch_list, columns = ['filename', 'status']) - -def compare_ts_info(file): - mismatch = 'no_mismatch' - print(f'Comparing file {file.filepath}') - try: - file._parse_header() - file._get_ts_properties_all() - except: - return 'not_readable' - - for i, ts1 in enumerate(file.ts_packets_properties): - for j, ts2 in enumerate(file.ts_packets_properties): - pairs = zip(ts1, ts2) - if any(x != y for x, y in pairs): - mismatch = 'mismatch' - - return mismatch - - - -def get_file_list(): - from glob import glob - root = r'\\fsnph01\NPH_Archiv\LTM' - - return glob('\\'.join([root, '**', '**' , '*.e'])) - - if __name__ == '__main__': - - #file = NicoletRawIO(r'\\fsnph01\NPH_Research\xxx_PythonShare\nicolet_parser\data\janbrogger.e') - #file = NicoletRawIO(r'\\fsnph01\NPH_Research\xxx_PythonShare\nicolet_parser\data\Routine6t1.e') - #file = NicoletRawIO(r'\\fsnph01\NPH_Archiv\LTM\Band0299\58795\9140.e') - #file = NicoletRawIO(r'C:\temp\3407_2.e') - - file = NicoletRawIO(r'\\fsnph01\NPH_Archiv\LTM\Band0193\20568\Patient3_ABLEIT25_t2.e') - ''' + file = NicoletRawIO(r'C:\temp\Patient20_ABLEIT53_t1.e') file._parse_header() - file._get_ts_properties_all() - ts_packets = file.ts_packets_properties - for i, ts1 in enumerate(file.ts_packets_properties): - for j, ts2 in enumerate(file.ts_packets_properties): - pairs = zip(ts1, ts2) - if any(x != y for x, y in pairs): - print(f'Mismatch between {i} and {j}') - ''' - - mismatch_df = get_ts_info_mismatches() - mismatch_df.columns = ['filename', 'status'] - mismatched_ts_info = mismatch_df[mismatch_df.status == True] - not_read = mismatch_df[mismatch_df.status == 'not_readable'] - mismatch_df.to_csv(r'C:\temp\tsinfo_mismatches_lzm193.csv') - #TODO: Find differences in TS_Packets, and categorise them - #Then, check if they make a difference - #temp = glob('\\'.join([root, '**', '**' , '*.e'])) ca 55k files - -#%% - + file._get_analogsignal_chunk() \ No newline at end of file From c8054b860c1ddae9da3e5979ddd5ed5ae1a02b6a Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Fri, 13 Sep 2024 10:34:25 +0200 Subject: [PATCH 07/63] Remove unused datareads from _get_tags --- neo/rawio/nicoletrawio.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index d6ffc2aa9..22e150e46 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -147,20 +147,11 @@ def _parse_header(self): self._generate_additional_annotations() def _get_tags(self): - misc_structure = [ - ('misc1', 'uint32', 5) - ] tags_structure = [ ('tag', 'S80'), ('index', 'uint32')] with open(self.filepath, "rb") as fid: - - index_idx = read_as_dict(fid, - misc_structure) - unknown = read_as_dict(fid, - [('unknown', 'uint32', 1)]) - fid.seek(172) n_tags = read_as_list(fid, [('n_tags', 'uint32')]) @@ -172,7 +163,6 @@ def _get_tags(self): except KeyError: entry['id_str'] = 'UNKNOWN' self.n_tags = n_tags - self.index_idx = index_idx self.tags = tags def _get_qi(self): From 1e4d69dca0b42cf5902c6a13920b96f8cd9bf79f Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Fri, 13 Sep 2024 10:49:22 +0200 Subject: [PATCH 08/63] Add trailing comma to lists --- neo/rawio/nicoletrawio.py | 98 +++++++++++++++++++++++++++------------ 1 file changed, 69 insertions(+), 29 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index 22e150e46..ba90205fb 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -92,14 +92,38 @@ class NicoletRawIO(BaseRawIO): '{291E2381-B3B4-44D1-BB77-8CF5C24420D7}' : 'GENERALSAMPLESGUID', '{5F11C628-FCCC-4FDD-B429-5EC94CB3AFEB}' : 'FILTERSAMPLESGUID', '{728087F8-73E1-44D1-8882-C770976478A2}' : 'DATEXDATAGUID', - '{35F356D9-0F1C-4DFE-8286-D3DB3346FD75}' : 'TESTINFOGUID'} + '{35F356D9-0F1C-4DFE-8286-D3DB3346FD75}' : 'TESTINFOGUID', + } INFO_PROPS = [ - 'patientID', 'firstName','middleName','lastName', - 'altID','mothersMaidenName','DOB','DOD','street','sexID','phone', - 'notes','dominance','siteID','suffix','prefix','degree','apartment', - 'city','state','country','language','height','weight','race','religion', - 'maritalStatus'] + 'patientID', + 'firstName', + 'middleName', + 'lastName', + 'altID', + 'mothersMaidenName', + 'DOB', + 'DOD', + 'street', + 'sexID', + 'phone', + 'notes', + 'dominance', + 'siteID', + 'suffix', + 'prefix', + 'degree', + 'apartment', + 'city', + 'state', + 'country', + 'language', + 'height', + 'weight', + 'race', + 'religion', + 'maritalStatus', + ] HC_EVENT = { '{A5A95612-A7F8-11CF-831A-0800091B5BDA}' : 'Annotation', @@ -121,7 +145,8 @@ class NicoletRawIO(BaseRawIO): '{BAE4550A-8409-4289-9D8A-0D571A206BEC}' : 'Eating', '{1F3A45A4-4D0F-4CC4-A43A-CAD2BC2D71F2}' : 'ECG', '{B0BECF64-E669-42B1-AE20-97A8B0BBEE26}' : 'Toilet', - '{A5A95611-A7F8-11CF-831A-0800091B5BDA}' : 'Fix Electrode'} + '{A5A95611-A7F8-11CF-831A-0800091B5BDA}' : 'Fix Electrode', + } def __init__(self, filepath = ""): BaseRawIO.__init__(self) @@ -149,7 +174,8 @@ def _parse_header(self): def _get_tags(self): tags_structure = [ ('tag', 'S80'), - ('index', 'uint32')] + ('index', 'uint32'), + ] with open(self.filepath, "rb") as fid: fid.seek(172) @@ -197,7 +223,8 @@ def _get_main_index(self): 'section_idx' : int(var[3*(i)]), 'offset' : int(var[3*(i)+1]), 'block_l' : int(var[3*(i)+2] % 2**32), - 'section_l' : round(var[3*(i)+2]/(2**32))}) + 'section_l' : round(var[3*(i)+2]/(2**32)), + }) next_index_pointer = read_as_list(fid, [('next_index_pointer', 'uint64')]) current_index = current_index + (i + 1) @@ -210,7 +237,8 @@ def _read_dynamic_packets(self): ('date', 'float64'), ('datefrace', 'float64'), ('internal_offset_start', 'uint64'), - ('packet_size', 'uint64')] + ('packet_size', 'uint64'), + ] dynamic_packets = [] [dynamic_packets_instace] = self._get_index_instances(id_str = 'InfoChangeStream') offset = dynamic_packets_instace['offset'] @@ -265,7 +293,8 @@ def _get_patient_guid(self): ('guid', 'uint8', 16), ('l_section', 'uint64'), ('n_values', 'uint64'), - ('n_bstr', 'uint64')] + ('n_bstr', 'uint64'), + ] with open(self.filepath, "rb") as fid: fid.seek(idx_instance['offset']) patient_info = read_as_dict(fid, @@ -291,7 +320,6 @@ def _get_patient_guid(self): value = ''.join([read_as_list(fid, [('value', 'S2')]) for _ in range(int(str_setup[i + 1]) + 1)]).strip() patient_info[self.INFO_PROPS[int(id_temp) - 1]] = value - pass #TODO: Find out if removing 'pass' makes any difference self.patient_info = patient_info def _get_signal_properties(self): @@ -307,7 +335,8 @@ def _get_signal_properties(self): signal_properties = [] signal_structure_segment = [ ('guid', 'uint8', 16), - ('name', 'S1', self.ITEMNAMESIZE)] + ('name', 'S1', self.ITEMNAMESIZE), + ] idx_instances = self._get_index_instances('SIGNALINFOGUID') for instance in idx_instances: with open(self.filepath, "rb") as fid: @@ -334,9 +363,11 @@ def _get_channel_info(self): channel_properties = [] channel_structure_structure= [ [('guid', 'uint8', 16), - ('name', 'S1', self.ITEMNAMESIZE)], + ('name', 'S1', self.ITEMNAMESIZE), + ], [('reserved', 'uint8', 16), - ('device_id', 'uint8', 16)] + ('device_id', 'uint8', 16), + ], ] [idx_instance] = self._get_index_instances('CHANNELGUID') with open(self.filepath, "rb") as fid: @@ -356,7 +387,8 @@ def _get_channel_info(self): ('sampling_rate', 'float64'), ('on', 'uint32'), ('l_input_id', 'uint32'), - ('l_input_setting_id', 'uint32')] + ('l_input_setting_id', 'uint32'), + ] info = read_as_dict(fid, channel_properties_structure) fid.seek(128, 1) @@ -391,7 +423,7 @@ def _get_ts_properties_all(self): internal_offset = internal_offset + self.TSLABELSIZE; top_range = offset + internal_offset + 8 ref_sensor = _transform_ts_properties(ts_packet['data'][(offset + internal_offset):top_range], np.uint16) - internal_offset += 64; + internal_offset += 64 low_cut, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) high_cut, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) sampling_rate, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) @@ -410,7 +442,8 @@ def _get_ts_properties_all(self): 'resolution' : resolution, 'notch' : notch, 'mark' : mark, - 'eeg_offset' : eeg_offset}) + 'eeg_offset' : eeg_offset, + }) ts_packets_properties.append(ts_properties) self.ts_packets = ts_packets self.ts_packets_properties = ts_packets_properties @@ -457,7 +490,8 @@ def _get_ts_properties(self, ts_packet_index = 0): 'resolution' : resolution, 'notch' : notch, 'mark' : mark, - 'eeg_offset' : eeg_offset}) + 'eeg_offset' : eeg_offset, + }) self.ts_properties = ts_properties def _get_segment_start_times(self): @@ -497,7 +531,8 @@ def _get_events(self): with open(self.filepath, "rb") as fid: pkt_structure = [ ('guid', 'uint8', 16), - ('len', 'uint64', 1)] + ('len', 'uint64', 1), + ] fid.seek(offset) pkt = read_as_dict(fid, pkt_structure) @@ -507,11 +542,13 @@ def _get_events(self): event_structure = [ [('date_ole', 'float64'), ('date_fraction', 'float64'), - ('duration', 'float64')], + ('duration', 'float64'), + ], [('user', 'S2', 12), ('text_length', 'uint64'), - ('guid', 'uint8', 16)], - [('label', 'S2', 32)] + ('guid', 'uint8', 16), + ], + [('label', 'S2', 32)], ] n_events += 1 fid.seek(8, 1) @@ -562,12 +599,15 @@ def _get_montage(self): montage_instances = self._get_index_instances(id_str = 'DERIVATIONGUID') with open(self.filepath, "rb") as fid: montage_info_structure = [ - [('name', 'S2', 32)], + [('name', 'S2', 32), + ], [('n_derivations', 'uint32'), - ('n_derivations_2', 'uint32')], + ('n_derivations_2', 'uint32'), + ], [('derivation_name', 'S2', 64), ('signal_name_1', 'S2', 32), - ('signal_name_2', 'S2', 32)] + ('signal_name_2', 'S2', 32), + ], ] fid.seek(int(montage_instances[0]['offset']) + 40) montage_info = read_as_dict(fid, @@ -585,8 +625,9 @@ def _get_montage(self): display_structure = [[ ('name', 'S2', 32)], [('n_traces', 'uint32'), - ('n_traces_2', 'uint32')], - [('color', 'uint32')] + ('n_traces_2', 'uint32'), + ], + [('color', 'uint32')], ] fid.seek(int(display_instances[0]['offset']) + 40) display = read_as_dict(fid, @@ -770,7 +811,6 @@ def _segment_t_start(self, block_index: int, seg_index: int): all_starts.append(block_starts) return all_starts[block_index][seg_index] - def _segment_t_stop(self, block_index: int, seg_index: int): all_stops = [] for block_index in range(self.header['nb_block']): From 617d3737938df47af42dbd68d717163e47acc968 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Fri, 13 Sep 2024 12:25:46 +0200 Subject: [PATCH 09/63] Fixed dtype FutureWarning from 534 --- neo/rawio/nicoletrawio.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index ba90205fb..d0c8ee645 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -531,7 +531,7 @@ def _get_events(self): with open(self.filepath, "rb") as fid: pkt_structure = [ ('guid', 'uint8', 16), - ('len', 'uint64', 1), + ('len', 'uint64'), ] fid.seek(offset) pkt = read_as_dict(fid, From e488ebbfa1e121e7772a429aadb1970ce7c6df3e Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Tue, 17 Sep 2024 11:10:31 +0200 Subject: [PATCH 10/63] Removed Remaining TODOs --- neo/rawio/nicoletrawio.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index d0c8ee645..55b54dd3c 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -851,7 +851,7 @@ def _get_event_timestamps( events = [event for event in self.events if event['type'] == str(event_channel_index) and event['seg_index'] == seg_index] timestamp = np.array([event['timestamp'] for event in events], dtype="float64") durations = np.array([event['duration'] for event in events], dtype="float64") - labels = np.array([event['id_str'] for event in events], dtype="U12") #TODO: Check if using id_str makes sense + labels = np.array([event['id_str'] for event in events], dtype="U12") if t_start is not None: keep = timestamp >= t_start timestamp, durations, labels = timestamp[keep], durations[keep], labels[keep] @@ -933,7 +933,7 @@ def _transform_ts_properties(data, dtype): return(_transform_char(cast_list)) def _transform_char(line): - if type(line) != list: line = [line] #TODO: Find a better way to do this + if type(line) != list: line = [line] line = ''.join([chr(item) for item in line if chr(item) != '\x00']) return line @@ -954,4 +954,5 @@ def _get_relevant_section(lengths_list, to_compare): if __name__ == '__main__': file = NicoletRawIO(r'C:\temp\Patient20_ABLEIT53_t1.e') file._parse_header() - file._get_analogsignal_chunk() \ No newline at end of file + file._get_analogsignal_chunk() + From e233438c73060d7106a838ccac2e631360100370 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Tue, 17 Sep 2024 11:12:44 +0200 Subject: [PATCH 11/63] Removed name==main --- neo/rawio/nicoletrawio.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index 55b54dd3c..0617ddf25 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -949,10 +949,4 @@ def _get_relevant_section(lengths_list, to_compare): segment = min([j for j, length in enumerate(lengths_list) if length > to_compare]) except ValueError: segment = len(lengths_list) - return(segment) - -if __name__ == '__main__': - file = NicoletRawIO(r'C:\temp\Patient20_ABLEIT53_t1.e') - file._parse_header() - file._get_analogsignal_chunk() - + return(segment) \ No newline at end of file From 74ac595d645be64b2d0b2b4b3ac433a134b3721f Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Wed, 18 Sep 2024 16:07:28 +0200 Subject: [PATCH 12/63] Add _convert_to_date to dynamic_packets and ts_info --- neo/rawio/nicoletrawio.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index 0617ddf25..c16565d4c 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -249,6 +249,7 @@ def _read_dynamic_packets(self): guid_offset = offset + (i+1)*48 dynamic_packet = read_as_dict(fid, dynamic_packet_structure) + dynamic_packet['date'] = _convert_to_date(dynamic_packet['date']) guid_as_str = _convert_to_guid(dynamic_packet['guid_list']) if guid_as_str in list(self.TAGS_DICT.keys()): id_str = self.TAGS_DICT[guid_as_str] @@ -369,7 +370,7 @@ def _get_channel_info(self): ('device_id', 'uint8', 16), ], ] - [idx_instance] = self._get_index_instances('CHANNELGUID') + idx_instance = self._get_index_instances('CHANNELGUID')[0] with open(self.filepath, "rb") as fid: fid.seek(idx_instance['offset']) channel_structure = read_as_dict(fid, @@ -949,4 +950,4 @@ def _get_relevant_section(lengths_list, to_compare): segment = min([j for j, length in enumerate(lengths_list) if length > to_compare]) except ValueError: segment = len(lengths_list) - return(segment) \ No newline at end of file + return(segment) From 2a9aa44b67d11a78be0d6f784d3034967abd1bca Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Wed, 18 Sep 2024 16:09:24 +0200 Subject: [PATCH 13/63] Change get_index_instances for CHANNELGUID --- neo/rawio/nicoletrawio.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index c16565d4c..e7bbac07c 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -370,7 +370,7 @@ def _get_channel_info(self): ('device_id', 'uint8', 16), ], ] - idx_instance = self._get_index_instances('CHANNELGUID')[0] + [idx_instance] = self._get_index_instances('CHANNELGUID') with open(self.filepath, "rb") as fid: fid.seek(idx_instance['offset']) channel_structure = read_as_dict(fid, From 353ac4dafb4969d82b38d6702a94040467d7a331 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Wed, 18 Sep 2024 16:23:56 +0200 Subject: [PATCH 14/63] Modify _convert_to_date to use seconds --- neo/rawio/nicoletrawio.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index e7bbac07c..e871fa3d7 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -917,9 +917,9 @@ def _convert_to_guid(hex_list, dec_list = [f'{nr:x}'.upper().rjust(2, '0') for nr in hex_list] return('{' + guid_format.format(*dec_list) + '}') -def _convert_to_date(data_float, origin = '31-12-1899'): +def _convert_to_date(data_float, origin = '30-12-1899'): #Set Origin to 1 day back to account for something return(datetime.strptime(origin, '%d-%m-%Y') - + timedelta(days = int(data_float))) + + timedelta(seconds = int(data_float*24*60*60))) def _typecast(data, dtype_in = np.uint8, dtype_out = np.uint32): data = np.array(data, dtype = dtype_in) From 466a008d288dd7aa4df3eec7313bd0eb0597ff72 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Fri, 25 Oct 2024 11:23:29 +0200 Subject: [PATCH 15/63] Fix Channelinfo by taking the first channelset --- neo/rawio/nicoletrawio.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index e871fa3d7..9ca69f84a 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -370,7 +370,7 @@ def _get_channel_info(self): ('device_id', 'uint8', 16), ], ] - [idx_instance] = self._get_index_instances('CHANNELGUID') + idx_instance = self._get_index_instances('CHANNELGUID')[0] with open(self.filepath, "rb") as fid: fid.seek(idx_instance['offset']) channel_structure = read_as_dict(fid, From da605a2b300e3c73a78291f93fc9f9fa8b5329c8 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Thu, 7 Nov 2024 15:10:38 +0100 Subject: [PATCH 16/63] Fix not all events read error --- neo/rawio/nicoletrawio.py | 68 +++++++++++++++++++++------------------ 1 file changed, 36 insertions(+), 32 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index 9ca69f84a..65327611d 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -552,43 +552,47 @@ def _get_events(self): [('label', 'S2', 32)], ] n_events += 1 - fid.seek(8, 1) - event = read_as_dict(fid, - event_structure[0]) - fid.seek(48, 1) - event = event | read_as_dict(fid, - event_structure[1]) - fid.seek(16, 1) - event = event | read_as_dict(fid, - event_structure[2]) - event['date'] = datetime.fromtimestamp(event['date_ole']*self.SEC_PER_DAY + event['date_fraction'] - self.UNIX_TIME_CONVERSION) - event['timestamp'] = (event['date'] - self.segments_properties[0]['date']).total_seconds() - event['guid'] = _convert_to_guid(event['guid']) try: - id_str = self.HC_EVENT[event['guid']] + fid.seek(8, 1) + event = read_as_dict(fid, + event_structure[0]) + fid.seek(48, 1) + event = event | read_as_dict(fid, + event_structure[1]) + fid.seek(16, 1) + event = event | read_as_dict(fid, + event_structure[2]) + event['date'] = datetime.fromtimestamp(event['date_ole']*self.SEC_PER_DAY + event['date_fraction'] - self.UNIX_TIME_CONVERSION) + event['timestamp'] = (event['date'] - self.segments_properties[0]['date']).total_seconds() + event['guid'] = _convert_to_guid(event['guid']) + try: + id_str = self.HC_EVENT[event['guid']] + except: + id_str = 'UNKNOWN' + if id_str == 'Annotation': + fid.seek(31, 1) + annotation = read_as_list(fid, + [('annotation', 'S2', event['text_length'])]) + else: + annotation = '' + event['id_str'] = id_str + event['annotation'] = annotation + event['block_index'] = 0 + seg_index = 0 + segment_time_range = [segment['date'] for segment in self.segments_properties] + for segment_time in segment_time_range[1:]: + if segment_time < event['date']: + seg_index += 1 + event['seg_index'] = seg_index + events.append(event) + event['type'] = '0' if event['duration'] == 0 else '1' except: - id_str = 'UNKNOWN' - if id_str == 'Annotation': - fid.seek(31, 1) - annotation = read_as_list(fid, - [('annotation', 'S2', event['text_length'])]) - else: - annotation = '' - event['id_str'] = id_str - event['annotation'] = annotation - event['block_index'] = 0 - seg_index = 0 - segment_time_range = [segment['date'] for segment in self.segments_properties] - for segment_time in segment_time_range[1:]: - if segment_time < event['date']: - seg_index += 1 - event['seg_index'] = seg_index - events.append(event) - event['type'] = '0' if event['duration'] == 0 else '1' + warnings.warn(f'Not all events could not be read, only {n_events - 1} events were read', BytesWarning) + break offset += int(pkt['len']) fid.seek(offset) pkt = read_as_dict(fid, - pkt_structure) + pkt_structure) pkt['guid'] = _convert_to_guid(pkt['guid']) self.events = events pass From 012cd9b44f3dab07ee43af6eb61f530c171d16e7 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Thu, 7 Nov 2024 15:39:10 +0100 Subject: [PATCH 17/63] Add loop to fill all unread patient props as None --- neo/rawio/nicoletrawio.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index 65327611d..1e67881eb 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -321,6 +321,10 @@ def _get_patient_guid(self): value = ''.join([read_as_list(fid, [('value', 'S2')]) for _ in range(int(str_setup[i + 1]) + 1)]).strip() patient_info[self.INFO_PROPS[int(id_temp) - 1]] = value + + for prop in self.INFO_PROPS: + if prop not in patient_info.keys(): + patient_info[prop] = None self.patient_info = patient_info def _get_signal_properties(self): @@ -954,4 +958,4 @@ def _get_relevant_section(lengths_list, to_compare): segment = min([j for j, length in enumerate(lengths_list) if length > to_compare]) except ValueError: segment = len(lengths_list) - return(segment) + return(segment) \ No newline at end of file From 5633167cb1f1295738b45697c68936791d548888 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Tue, 11 Feb 2025 09:08:07 +0100 Subject: [PATCH 18/63] Remove name main from nicoletio --- neo/io/nicoletio.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/neo/io/nicoletio.py b/neo/io/nicoletio.py index c4d29c558..6bbb3c308 100644 --- a/neo/io/nicoletio.py +++ b/neo/io/nicoletio.py @@ -28,12 +28,3 @@ class NicoletIO(NicoletRawIO, BaseFromRaw): def __init__(self, filepath=""): NicoletRawIO.__init__(self, filepath=filepath) BaseFromRaw.__init__(self, filepath) - - -if __name__ == '__main__': - - #file = NicoletRawIO(r'\\fsnph01\NPH_Research\xxx_PythonShare\nicolet_parser\data\janbrogger.e') - #file = NicoletRawIO(r'\\fsnph01\NPH_Research\xxx_PythonShare\nicolet_parser\data\Routine6t1.e') - #file = NicoletRawIO(r'\\fsnph01\NPH_Archiv\LTM\Band0299\58795\9140.e') - file = NicoletIO(r'C:\temp\Patient1_ABLEIT53_t2.e') - segment = file.read_segment() From 0bd4b99f89b9a89087ea3a13407c7ae2e5ed6753 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Tue, 11 Feb 2025 09:10:19 +0100 Subject: [PATCH 19/63] Revert "Merge branch 'master' into no_events_found_879" This reverts commit d8584b0001985f08ff1dc3db21b5c9fd372ea181, reversing changes made to ac6aeda64635a2aae50a20b990cbab667d74734f. --- neo/io/nicoletio.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/neo/io/nicoletio.py b/neo/io/nicoletio.py index 6bbb3c308..c4d29c558 100644 --- a/neo/io/nicoletio.py +++ b/neo/io/nicoletio.py @@ -28,3 +28,12 @@ class NicoletIO(NicoletRawIO, BaseFromRaw): def __init__(self, filepath=""): NicoletRawIO.__init__(self, filepath=filepath) BaseFromRaw.__init__(self, filepath) + + +if __name__ == '__main__': + + #file = NicoletRawIO(r'\\fsnph01\NPH_Research\xxx_PythonShare\nicolet_parser\data\janbrogger.e') + #file = NicoletRawIO(r'\\fsnph01\NPH_Research\xxx_PythonShare\nicolet_parser\data\Routine6t1.e') + #file = NicoletRawIO(r'\\fsnph01\NPH_Archiv\LTM\Band0299\58795\9140.e') + file = NicoletIO(r'C:\temp\Patient1_ABLEIT53_t2.e') + segment = file.read_segment() From 691796a74efb7470592f94a83295a4d36e8539e1 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Thu, 7 Nov 2024 16:13:07 +0100 Subject: [PATCH 20/63] Add try except to catch non-existing instances --- neo/rawio/nicoletrawio.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index 1e67881eb..e6cb909aa 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -884,9 +884,18 @@ def _get_index_instances(self, id_str = '', tag = ''): if tag: identifier = 'tag' id_str = tag - info_idx = [entry[identifier] for entry in self.tags].index(id_str) - matching_idx = [entry['section_idx'] == info_idx for entry in self.main_index] - idx_instance = [entry for entry, match in zip(self.main_index, matching_idx) if match] + try: + info_idx = [entry[identifier] for entry in self.tags].index(id_str) + matching_idx = [entry['section_idx'] == info_idx for entry in self.main_index] + idx_instance = [entry for entry, match in zip(self.main_index, matching_idx) if match] + except: + warnings.warn(f'No entries for instance "{id_str}" found', BytesWarning) + idx_instance = { + 'section_idx': 0, + 'offset' : 0, + 'block_l': 0, + 'section_l': 0 + } return(idx_instance) def read_as_dict(fid, dtype): @@ -958,4 +967,4 @@ def _get_relevant_section(lengths_list, to_compare): segment = min([j for j, length in enumerate(lengths_list) if length > to_compare]) except ValueError: segment = len(lengths_list) - return(segment) \ No newline at end of file + return(segment) From 8da02e37a7e08326ad0819bb6fc755dd4900df22 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Tue, 11 Feb 2025 09:12:49 +0100 Subject: [PATCH 21/63] Reapply "Merge branch 'master' into no_events_found_879" This reverts commit 0bd4b99f89b9a89087ea3a13407c7ae2e5ed6753. --- neo/io/nicoletio.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/neo/io/nicoletio.py b/neo/io/nicoletio.py index c4d29c558..6bbb3c308 100644 --- a/neo/io/nicoletio.py +++ b/neo/io/nicoletio.py @@ -28,12 +28,3 @@ class NicoletIO(NicoletRawIO, BaseFromRaw): def __init__(self, filepath=""): NicoletRawIO.__init__(self, filepath=filepath) BaseFromRaw.__init__(self, filepath) - - -if __name__ == '__main__': - - #file = NicoletRawIO(r'\\fsnph01\NPH_Research\xxx_PythonShare\nicolet_parser\data\janbrogger.e') - #file = NicoletRawIO(r'\\fsnph01\NPH_Research\xxx_PythonShare\nicolet_parser\data\Routine6t1.e') - #file = NicoletRawIO(r'\\fsnph01\NPH_Archiv\LTM\Band0299\58795\9140.e') - file = NicoletIO(r'C:\temp\Patient1_ABLEIT53_t2.e') - segment = file.read_segment() From b1f55671be9d259715342a20cf711f5326e2eaf0 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Tue, 11 Feb 2025 09:13:04 +0100 Subject: [PATCH 22/63] Revert "Reapply "Merge branch 'master' into no_events_found_879"" This reverts commit 8da02e37a7e08326ad0819bb6fc755dd4900df22. --- neo/io/nicoletio.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/neo/io/nicoletio.py b/neo/io/nicoletio.py index 6bbb3c308..c4d29c558 100644 --- a/neo/io/nicoletio.py +++ b/neo/io/nicoletio.py @@ -28,3 +28,12 @@ class NicoletIO(NicoletRawIO, BaseFromRaw): def __init__(self, filepath=""): NicoletRawIO.__init__(self, filepath=filepath) BaseFromRaw.__init__(self, filepath) + + +if __name__ == '__main__': + + #file = NicoletRawIO(r'\\fsnph01\NPH_Research\xxx_PythonShare\nicolet_parser\data\janbrogger.e') + #file = NicoletRawIO(r'\\fsnph01\NPH_Research\xxx_PythonShare\nicolet_parser\data\Routine6t1.e') + #file = NicoletRawIO(r'\\fsnph01\NPH_Archiv\LTM\Band0299\58795\9140.e') + file = NicoletIO(r'C:\temp\Patient1_ABLEIT53_t2.e') + segment = file.read_segment() From abc5e40a7cc05aace151c9c7ce29d57102703ed8 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Tue, 11 Feb 2025 09:24:33 +0100 Subject: [PATCH 23/63] Add helper function ensure that output is always a list --- neo/rawio/nicoletrawio.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index e6cb909aa..f87fcb17c 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -530,7 +530,7 @@ def _get_segment_start_times(self): def _get_events(self): events = [] event_packet_guid = '{B799F680-72A4-11D3-93D3-00500400C148}' - event_instances = self._get_index_instances(tag = 'Events') + event_instances = _ensure_list(self._get_index_instances(tag = 'Events')) for instance in event_instances: offset = instance['offset'] with open(self.filepath, "rb") as fid: @@ -968,3 +968,12 @@ def _get_relevant_section(lengths_list, to_compare): except ValueError: segment = len(lengths_list) return(segment) + +def _ensure_list(output): + """ + Ensure the output is a list. If it is a single element, wrap it in a list. + If it is already a list, return it as is. + """ + if not isinstance(output, list): + return [output] + return output \ No newline at end of file From a837b64b4c1553a218ca48749c4eeca9c06c8398 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Tue, 11 Feb 2025 09:24:58 +0100 Subject: [PATCH 24/63] Remove name = main from nicoletio --- neo/io/nicoletio.py | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/neo/io/nicoletio.py b/neo/io/nicoletio.py index c4d29c558..1629afe32 100644 --- a/neo/io/nicoletio.py +++ b/neo/io/nicoletio.py @@ -27,13 +27,4 @@ class NicoletIO(NicoletRawIO, BaseFromRaw): def __init__(self, filepath=""): NicoletRawIO.__init__(self, filepath=filepath) - BaseFromRaw.__init__(self, filepath) - - -if __name__ == '__main__': - - #file = NicoletRawIO(r'\\fsnph01\NPH_Research\xxx_PythonShare\nicolet_parser\data\janbrogger.e') - #file = NicoletRawIO(r'\\fsnph01\NPH_Research\xxx_PythonShare\nicolet_parser\data\Routine6t1.e') - #file = NicoletRawIO(r'\\fsnph01\NPH_Archiv\LTM\Band0299\58795\9140.e') - file = NicoletIO(r'C:\temp\Patient1_ABLEIT53_t2.e') - segment = file.read_segment() + BaseFromRaw.__init__(self, filepath) \ No newline at end of file From 342bb1e7c8ff1fbbfe0a3486daea169a6d4245a4 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Tue, 11 Feb 2025 09:31:58 +0100 Subject: [PATCH 25/63] Make except clause output same format as try output --- neo/rawio/nicoletrawio.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index f87fcb17c..0d40ad8db 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -890,12 +890,12 @@ def _get_index_instances(self, id_str = '', tag = ''): idx_instance = [entry for entry, match in zip(self.main_index, matching_idx) if match] except: warnings.warn(f'No entries for instance "{id_str}" found', BytesWarning) - idx_instance = { + idx_instance = [{ 'section_idx': 0, 'offset' : 0, 'block_l': 0, 'section_l': 0 - } + }] return(idx_instance) def read_as_dict(fid, dtype): From 3a7aa98d78d97aa1e874356f846a16bb83f2257e Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Tue, 11 Feb 2025 09:50:57 +0100 Subject: [PATCH 26/63] Add if clause to skip code if no patient info is in file --- neo/rawio/nicoletrawio.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index 0d40ad8db..ff25a4594 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -314,13 +314,14 @@ def _get_patient_guid(self): else: value = 0 patient_info[self.INFO_PROPS[int(id_temp) - 1]] = value - str_setup = read_as_list(fid, - [('setup', 'uint64', int(patient_info['n_bstr']*2))]) - for i in range(0, int(patient_info['n_bstr']*2), 2): - id_temp = str_setup[i] - value = ''.join([read_as_list(fid, - [('value', 'S2')]) for _ in range(int(str_setup[i + 1]) + 1)]).strip() - patient_info[self.INFO_PROPS[int(id_temp) - 1]] = value + if patient_info['n_bstr'] != 0: + str_setup = read_as_list(fid, + [('setup', 'uint64', int(patient_info['n_bstr']*2))]) + for i in range(0, int(patient_info['n_bstr']*2), 2): + id_temp = str_setup[i] + value = ''.join([read_as_list(fid, + [('value', 'S2')]) for _ in range(int(str_setup[i + 1]) + 1)]).strip() + patient_info[self.INFO_PROPS[int(id_temp) - 1]] = value for prop in self.INFO_PROPS: if prop not in patient_info.keys(): From 95db7c2c28d0d5a5b8410f2494f4648c40a83abc Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Tue, 11 Feb 2025 10:28:22 +0100 Subject: [PATCH 27/63] Add if clause to return empty list if read size is 0 --- neo/rawio/nicoletrawio.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index ff25a4594..6470d6fc2 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -911,6 +911,8 @@ def read_as_dict(fid, dtype): def read_as_list(fid, dtype): dt = np.dtype(dtype) + if dt.itemsize == 0: + return [] h = np.frombuffer(fid.read(dt.itemsize), dt)[0][0] h = _process_bytes(h, dt[0]) return h From 7fd395187583e03806ce9ac5256fac657727d513 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Tue, 11 Feb 2025 11:41:15 +0100 Subject: [PATCH 28/63] Add basic tests --- neo/test/iotest/test_nicoletio.py | 106 ++++++++++++++++++++++++ neo/test/rawiotest/test_nicoletrawio.py | 18 ++++ 2 files changed, 124 insertions(+) create mode 100644 neo/test/iotest/test_nicoletio.py create mode 100644 neo/test/rawiotest/test_nicoletrawio.py diff --git a/neo/test/iotest/test_nicoletio.py b/neo/test/iotest/test_nicoletio.py new file mode 100644 index 000000000..50b9cf151 --- /dev/null +++ b/neo/test/iotest/test_nicoletio.py @@ -0,0 +1,106 @@ +""" +Tests of neo.io.exampleio +""" + +import pathlib +import unittest + +from neo.io.nicoletio import NicoletIO +from neo.test.iotest.common_io_test import BaseTestIO +from neo.test.iotest.tools import get_test_file_full_path +from neo.io.proxyobjects import AnalogSignalProxy, SpikeTrainProxy, EventProxy, EpochProxy +from neo import AnalogSignal, SpikeTrain + +import quantities as pq +import numpy as np + + +class TestExampleIO( + BaseTestIO, + unittest.TestCase, +): + ioclass = NicoletIO + entities_to_download = ["nicolet"] + entities_to_test = [ + "nicolet/e_files/test_nicolet.e", + ] + + def setUp(self): + super().setUp() + for entity in self.entities_to_test: + full_path = get_test_file_full_path(self.ioclass, filename=entity, directory=self.local_test_dir) + pathlib.Path(full_path).touch() + + def tearDown(self) -> None: + super().tearDown() + for entity in self.entities_to_test: + full_path = get_test_file_full_path(self.ioclass, filename=entity, directory=self.local_test_dir) + pathlib.Path(full_path).unlink(missing_ok=True) + +class TestNicoletIO(unittest.TestCase): + def test_read_segment_lazy(self): + r = NicoletIO(filename=None) + seg = r.read_segment(lazy=True) + for ana in seg.analogsignals: + assert isinstance(ana, AnalogSignalProxy) + ana = ana.load() + assert isinstance(ana, AnalogSignal) + for st in seg.spiketrains: + assert isinstance(st, SpikeTrainProxy) + st = st.load() + assert isinstance(st, SpikeTrain) + + seg = r.read_segment(lazy=False) + for anasig in seg.analogsignals: + assert isinstance(ana, AnalogSignal) + self.assertNotEqual(anasig.size, 0) + for st in seg.spiketrains: + assert isinstance(st, SpikeTrain) + self.assertNotEqual(st.size, 0) + + # annotations + assert "seg_extra_info" in seg.annotations + assert seg.name == "Seg #0 Block #0" + for anasig in seg.analogsignals: + assert anasig.name is not None + for st in seg.spiketrains: + assert st.name is not None + for ev in seg.events: + assert ev.name is not None + for ep in seg.epochs: + assert ep.name is not None + + def test_read_block(self): + r = NicoletIO(filename=None) + bl = r.read_block(lazy=True) + # assert len(bl.list_units) == 3 + # assert len(bl.channel_indexes) == 1 + 1 # signals grouped + units grouped + + def test_read_segment_with_time_slice(self): + r = NicoletIO(filename=None) + seg = r.read_segment(time_slice=None) + shape_full = seg.analogsignals[0].shape + spikes_full = seg.spiketrains[0] + event_full = seg.events[0] + + t_start, t_stop = 260 * pq.ms, 1.854 * pq.s + seg = r.read_segment(time_slice=(t_start, t_stop)) + shape_slice = seg.analogsignals[0].shape + spikes_slice = seg.spiketrains[0] + event_slice = seg.events[0] + + assert shape_full[0] > shape_slice[0] + + assert spikes_full.size > spikes_slice.size + assert np.all(spikes_slice >= t_start) + assert np.all(spikes_slice <= t_stop) + assert spikes_slice.t_start == t_start + assert spikes_slice.t_stop == t_stop + + assert event_full.size > event_slice.size + assert np.all(event_slice.times >= t_start) + assert np.all(event_slice.times <= t_stop) + + +if __name__ == "__main__": + unittest.main() diff --git a/neo/test/rawiotest/test_nicoletrawio.py b/neo/test/rawiotest/test_nicoletrawio.py new file mode 100644 index 000000000..e10dd704b --- /dev/null +++ b/neo/test/rawiotest/test_nicoletrawio.py @@ -0,0 +1,18 @@ +import unittest + +from neo.rawio.nicoletrawio import NicoletRawIO + +from neo.test.rawiotest.common_rawio_test import BaseTestRawIO + + +class TestExampleRawIO( + BaseTestRawIO, + unittest.TestCase, +): + rawioclass = NicoletRawIO + entities_to_download = ['nicolet'] + + entities_to_test = ["nicolet/e_files/test_nicolet.e"] + +if __name__ == "__main__": + unittest.main() From 4c4aae16776b7219707425de4119cf06861af870 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Tue, 11 Feb 2025 13:28:45 +0100 Subject: [PATCH 29/63] Add signal buffer --- neo/rawio/nicoletrawio.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index 6470d6fc2..a35ea7b67 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -19,6 +19,7 @@ BaseRawIO, _signal_channel_dtype, _signal_stream_dtype, + _signal_buffer_dtype, _spike_channel_dtype, _event_channel_dtype, ) @@ -160,7 +161,9 @@ def _parse_header(self): self.header = {} self.header["nb_block"] = 1 self.header["nb_segment"] = [len(self.segments_properties)] - self.header["signal_streams"] = np.array([("Signals", "0")], + self.header["signal_buffers"] = np.array([("Signals", "0")], + dtype=_signal_buffer_dtype) + self.header["signal_streams"] = np.array([("Signals", "0", "0")], dtype=_signal_stream_dtype) self.header["signal_channels"] = self._create_signal_channels(_signal_channel_dtype) if self.channel_properties else self._create_signal_channels_no_channel_props(_signal_channel_dtype) self.header["spike_channels"] = np.array([], @@ -702,7 +705,8 @@ def _create_signal_channels(self, dtype): signal['transducer'], timestream['resolution'], timestream['eeg_offset'], - 0)) + 0, + 0,)) return np.array(signal_channels, dtype = dtype) def _create_signal_channels_no_channel_props(self, dtype): @@ -719,6 +723,7 @@ def _create_signal_channels_no_channel_props(self, dtype): signal['transducer'], timestream['resolution'], timestream['eeg_offset'], + 0, 0)) return np.array(signal_channels, dtype = dtype) From 7beccfe35c5ae6e34415bf639a5d7aa6cd724c76 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Tue, 11 Feb 2025 16:05:05 +0100 Subject: [PATCH 30/63] Make signal_buffers empty bc no buffers in nic --- neo/rawio/nicoletrawio.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index a35ea7b67..04f849e9c 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -161,7 +161,7 @@ def _parse_header(self): self.header = {} self.header["nb_block"] = 1 self.header["nb_segment"] = [len(self.segments_properties)] - self.header["signal_buffers"] = np.array([("Signals", "0")], + self.header["signal_buffers"] = np.array([], dtype=_signal_buffer_dtype) self.header["signal_streams"] = np.array([("Signals", "0", "0")], dtype=_signal_stream_dtype) From 9d682666a41302e1a1fb163a17e894d643c3220c Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Tue, 11 Feb 2025 16:52:59 +0100 Subject: [PATCH 31/63] Create diff streams to signal with diff sample size --- neo/rawio/nicoletrawio.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index 04f849e9c..1976ca33b 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -692,11 +692,16 @@ def _extract_header_information(self): def _create_signal_channels(self, dtype): signal_channels = [] + signal_streams = {} + stream_id = 0 for channel in self.channel_properties: signal = next((item for item in self.signal_properties if item["name"] == channel['sensor']), None) timestream = next((item for item in self.ts_properties if item["label"].split('-')[0] == channel['sensor']), None) if signal is None or timestream is None: continue + if channel['sampling_rate'] not in signal_streams.keys(): + signal_streams[channel['sampling_rate']] = stream_id + stream_id += 1 signal_channels.append(( channel['sensor'], channel['l_input_id'], @@ -705,16 +710,23 @@ def _create_signal_channels(self, dtype): signal['transducer'], timestream['resolution'], timestream['eeg_offset'], - 0, + signal_streams[timestream['sampling_rate']], 0,)) + self.signal_streams = signal_streams + return np.array(signal_channels, dtype = dtype) def _create_signal_channels_no_channel_props(self, dtype): signal_channels = [] + signal_streams = {} + stream_id = 0 for i, timestream in enumerate(self.ts_properties): signal = next((item for item in self.signal_properties if item["name"] == timestream['label'].split('-')[0]), None) if signal is None: continue + if timestream['sampling_rate'] not in signal_streams.keys(): + signal_streams[timestream['sampling_rate']] = stream_id + stream_id += 1 signal_channels.append(( timestream['label'].split('-')[0], i, @@ -723,8 +735,9 @@ def _create_signal_channels_no_channel_props(self, dtype): signal['transducer'], timestream['resolution'], timestream['eeg_offset'], - 0, + signal_streams[timestream['sampling_rate']], 0)) + self.signal_streams = signal_streams return np.array(signal_channels, dtype = dtype) def _generate_additional_annotations(self): From d1703eb50e4e16080d2b885af2099b2e0c49ade8 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Tue, 11 Feb 2025 17:33:54 +0100 Subject: [PATCH 32/63] Add support to read in more than 1 signal channel --- neo/rawio/nicoletrawio.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index 1976ca33b..9d16e0ad7 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -163,9 +163,9 @@ def _parse_header(self): self.header["nb_segment"] = [len(self.segments_properties)] self.header["signal_buffers"] = np.array([], dtype=_signal_buffer_dtype) - self.header["signal_streams"] = np.array([("Signals", "0", "0")], - dtype=_signal_stream_dtype) self.header["signal_channels"] = self._create_signal_channels(_signal_channel_dtype) if self.channel_properties else self._create_signal_channels_no_channel_props(_signal_channel_dtype) + self.header["signal_streams"] = np.array([(f"Signals {signal_id}", signal_id, "0") for signal_id in self.signal_streams.values()], + dtype=_signal_stream_dtype) self.header["spike_channels"] = np.array([], dtype= _spike_channel_dtype) self.header["event_channels"] = np.array([("Events", "0", "event"), @@ -774,16 +774,16 @@ def _get_analogsignal_chunk(self, if seg_index >= self.header['nb_segment'][block_index]: raise IndexError(f"Segment Index out of range. There are {self.header['nb_segment'][block_index]} segments for block {block_index}") if channel_indexes is None: - channel_indexes = list(range(0,len(self.ts_properties))) + channel_indexes = [i for i, channel in enumerate(self.header['signal_channels']) if channel['stream_id'] == str(stream_index)] nb_chan = len(channel_indexes) elif isinstance(channel_indexes, slice): - channel_indexes = np.arange(len(self.ts_properties), dtype="int")[channel_indexes] + channel_indexes = np.arange(self.header['signal_channels'].shape[0], dtype="int")[channel_indexes] nb_chan = len(channel_indexes) else: channel_indexes = np.asarray(channel_indexes) if any(channel_indexes < 0): raise IndexError("Channel Indices cannot be negative") - if any(channel_indexes >= len(self.ts_properties)): + if any(channel_indexes >= len(self.header['signal_channels'].shape[0])): raise IndexError("Channel Indices out of range") nb_chan = len(channel_indexes) if i_start is None: @@ -793,13 +793,14 @@ def _get_analogsignal_chunk(self, if i_start < 0 or i_stop > max(self.get_nr_samples(seg_index)): raise IndexError("Start or Stop Index out of bounds") eeg_sampling_rate = max(self.segments_properties[seg_index]['sampling_rates']) + current_samplingrate = self.segments_properties[seg_index]['sampling_rates'][channel_indexes[0]] cum_segment_duration = [0] + list(np.cumsum([(segment['duration'].total_seconds()) for segment in self.segments_properties])) - data = np.empty([i_stop - i_start, self.segments_properties[0]['sampling_rates'].count(eeg_sampling_rate)]) + data = np.empty([i_stop - i_start, self.segments_properties[0]['sampling_rates'].count(current_samplingrate)]) for i in channel_indexes: print('Current Channel: ' + str(i)) current_samplingrate = self.segments_properties[seg_index]['sampling_rates'][i] multiplicator = self.segments_properties[seg_index]['scale'][i] - if current_samplingrate != eeg_sampling_rate: + if current_samplingrate != eeg_sampling_rate: # Only keeps the channels with the eeg sampling rate, all others get skipped continue [tag_idx] = [tag['index'] for tag in self.tags if tag['tag'] == str(i)] all_sections = [j for j, idx_id in enumerate(self.all_section_ids) if idx_id == tag_idx] From 5e159feebc041de585d2e1c56bc6781688ab2bdd Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Tue, 11 Feb 2025 17:56:03 +0100 Subject: [PATCH 33/63] Add full functionality for more than 1 stream input --- neo/rawio/nicoletrawio.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index 9d16e0ad7..b2311fc85 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -659,10 +659,10 @@ def _get_montage(self): self.montages = montages self.display = display - def get_nr_samples(self, block_index = 0, seg_index = 0): + def get_nr_samples(self, block_index = 0, seg_index = 0, stream_index = 0): try: duration = self.segments_properties[seg_index]['duration'].total_seconds() - return([int(sampling_rate * duration) for sampling_rate in self.segments_properties[seg_index]['sampling_rates']]) + return([int(sampling_rate * duration) for sampling_rate in self.segments_properties[seg_index]['sampling_rates'] if self.signal_streams[sampling_rate] == stream_index]) except IndexError as error: print(str(error) + ': Incorrect segment argument; seg_index must be an integer representing segment index, starting from 0.') pass @@ -789,19 +789,19 @@ def _get_analogsignal_chunk(self, if i_start is None: i_start = 0 if i_stop is None: - i_stop = max(self.get_nr_samples(seg_index)) - if i_start < 0 or i_stop > max(self.get_nr_samples(seg_index)): + i_stop = max(self.get_nr_samples(seg_index = seg_index, stream_index = stream_index)) + if i_start < 0 or i_stop > max(self.get_nr_samples(seg_index = seg_index, stream_index = stream_index)): #Get the maximum number of samples for the respective sampling rate raise IndexError("Start or Stop Index out of bounds") eeg_sampling_rate = max(self.segments_properties[seg_index]['sampling_rates']) current_samplingrate = self.segments_properties[seg_index]['sampling_rates'][channel_indexes[0]] cum_segment_duration = [0] + list(np.cumsum([(segment['duration'].total_seconds()) for segment in self.segments_properties])) data = np.empty([i_stop - i_start, self.segments_properties[0]['sampling_rates'].count(current_samplingrate)]) - for i in channel_indexes: + for i, channel_index in enumerate(channel_indexes): print('Current Channel: ' + str(i)) current_samplingrate = self.segments_properties[seg_index]['sampling_rates'][i] multiplicator = self.segments_properties[seg_index]['scale'][i] - if current_samplingrate != eeg_sampling_rate: # Only keeps the channels with the eeg sampling rate, all others get skipped - continue + #if current_samplingrate != eeg_sampling_rate: # Only keeps the channels with the eeg sampling rate, all others get skipped + # continue [tag_idx] = [tag['index'] for tag in self.tags if tag['tag'] == str(i)] all_sections = [j for j, idx_id in enumerate(self.all_section_ids) if idx_id == tag_idx] section_lengths = [int(index['section_l']/2) for j, index in enumerate(self.main_index) if j in all_sections] @@ -815,7 +815,7 @@ def _get_analogsignal_chunk(self, first_section = _get_relevant_section(cum_section_lengths, i_start - 1) last_section = _get_relevant_section(cum_section_lengths, i_stop + 1) - 1 if last_section > last_section_for_seg: - raise IndexError(f'Index out of range for channel {i}') + raise IndexError(f'Index out of range for channel {channel_index}') use_sections = all_sections[first_section:last_section] use_sections_length = section_lengths[first_section:last_section] np_idx = 0 @@ -825,7 +825,6 @@ def _get_analogsignal_chunk(self, stop = start + section_length data[np_idx:(np_idx + section_length), i] = multiplicator*self.raw_signal[slice(start, stop)] np_idx += section_length - return data def _segment_t_start(self, block_index: int, seg_index: int): @@ -854,7 +853,8 @@ def _segment_t_stop(self, block_index: int, seg_index: int): def _get_signal_size(self, block_index: int = 0, seg_index: int = 0, stream_index: int = 0): return max(self.get_nr_samples(block_index = block_index, - seg_index = seg_index)) + seg_index = seg_index, + stream_index = stream_index)) def _get_signal_t_start(self, block_index: int = 0, seg_index: int = 0, stream_index: int = 0): return self._segment_t_start(block_index, seg_index) From 34362d919445b2d287d6898345bf567a15252ee2 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Fri, 14 Feb 2025 12:31:57 +0100 Subject: [PATCH 34/63] Account for UTC+3 timeshift in Nicolet Files --- neo/rawio/nicoletrawio.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index 6470d6fc2..f31432ab8 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -45,7 +45,7 @@ class NicoletRawIO(BaseRawIO): TSLABELSIZE = 64 UNITSIZE = 16 ITEMNAMESIZE = 64 - UNIX_TIME_CONVERSION = 2209161600 + UNIX_TIME_CONVERSION = 2209161600+10800 #Dates are saved with an Origin of 1899-12-31 in .e files, but python datetime requires unix time. Also, dates are in UTC+3, so add 3 hours to UNIX (i.e. start 3 hours later) to get UTC SEC_PER_DAY = 86400 TAGS_DICT = { @@ -937,7 +937,7 @@ def _convert_to_guid(hex_list, dec_list = [f'{nr:x}'.upper().rjust(2, '0') for nr in hex_list] return('{' + guid_format.format(*dec_list) + '}') -def _convert_to_date(data_float, origin = '30-12-1899'): #Set Origin to 1 day back to account for something +def _convert_to_date(data_float, origin = '30-12-1899'): #Set Origin to 1 day back to account for 1 day offset return(datetime.strptime(origin, '%d-%m-%Y') + timedelta(seconds = int(data_float*24*60*60))) From b960c23df6143ab14654b2fd5c47354dc76dc09a Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Mon, 17 Feb 2025 10:38:33 +0100 Subject: [PATCH 35/63] Allow event id 'Event Comment' to read annotation (cherry picked from commit a67edca88286f9661520f2c0c65c6cc33123aa98) --- neo/rawio/nicoletrawio.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index 01d8c4e09..755435489 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -577,7 +577,7 @@ def _get_events(self): id_str = self.HC_EVENT[event['guid']] except: id_str = 'UNKNOWN' - if id_str == 'Annotation': + if id_str == 'Annotation' or id_str == 'Event Comment': fid.seek(31, 1) annotation = read_as_list(fid, [('annotation', 'S2', event['text_length'])]) From 34bfa8dc1ad38e6b8d4beba79015c96f9260099e Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Mon, 17 Feb 2025 13:36:01 +0100 Subject: [PATCH 36/63] Add more events (cherry picked from commit e01e1ca47327a42c4a2e6f5ad58f68fda3eeef2e) --- neo/rawio/nicoletrawio.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index 755435489..008d2af8e 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -147,6 +147,28 @@ class NicoletRawIO(BaseRawIO): '{1F3A45A4-4D0F-4CC4-A43A-CAD2BC2D71F2}' : 'ECG', '{B0BECF64-E669-42B1-AE20-97A8B0BBEE26}' : 'Toilet', '{A5A95611-A7F8-11CF-831A-0800091B5BDA}' : 'Fix Electrode', + '{08EC3F49-978D-4FE4-AE77-4C421335E5FF}' : 'Prune', + '{0A205CD4-1480-4F02-8AFF-4E4CD3B21078}' : 'Artifact', + '{A5A95609-A7F8-11CF-831A-0800091B5BDA}' : 'Print D', + '{A5A95637-A7F8-11CF-831A-0800091B5BDA}' : 'Tachycardia', + '{A0172995-4A24-401C-AB68-B585474E4C07}' : 'Seizure', + '{FF37D596-5703-43F9-A3F3-FA572C5D958C}' : 'Spike wave', + '{9DF82C59-6520-46E5-940F-16B1282F3DD6}' : 'EEG Check-theta li T', + '{06519E79-3C7B-4535-BA76-2AD76B6C65C8}' : 'Kom.-*', + '{CA4FCAD4-802E-4214-881A-E9C1C6549ABD}' : 'Arousal', + '{A5A95603-A7F8-11CF-831A-0800091B5BDA}' : 'Blink', + '{77A38C02-DCD4-4774-A47D-40437725B278}' : '+Anfallsmuster D-?', + '{32DB96B9-ED12-429A-B98D-27B2A82AD61F}' : 'spike wave', + '{24387A0E-AA04-40B4-82D4-6D58F24D59AB}' : 'Anfallsmuster', + '{A5A95636-A7F8-11CF-831A-0800091B5BDA}' : 'Bradycardia', + '{93A2CB2C-F420-4672-AA62-18989F768519}' : 'Detections Inactive', + '{8C5D49BA-7105-4355-BF6C-B35B9A4E594A}' : 'EEG-Check', + '{5A946B85-2E1D-46B8-9FB2-C0519C9BE681}' : 'Zaehneputzen', + '{48DA028A-5264-4620-AD03-C8787951E237}' : 'Bewegt', + '{C15CFF61-0326-4276-A08F-0BFC2354E7CC}' : 'Kratzt', + '{F4DD5874-23BA-4FFA-94DD-BE436BB6910F}' : 'Anfall', + '{A5A95610-A7F8-11CF-831A-0800091B5BDA}' : 'Flash', + '{8CB92AA7-A886-4013-8D52-6CD1C71C72B4}' : 'ETP', } def __init__(self, filepath = ""): From 7bfa258fd75715f98a842447b2eb6df78e88c7f1 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Mon, 17 Feb 2025 15:02:32 +0100 Subject: [PATCH 37/63] Change filepath for filename --- neo/io/nicoletio.py | 6 +++--- neo/rawio/nicoletrawio.py | 32 ++++++++++++++++---------------- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/neo/io/nicoletio.py b/neo/io/nicoletio.py index 1629afe32..a14944080 100644 --- a/neo/io/nicoletio.py +++ b/neo/io/nicoletio.py @@ -25,6 +25,6 @@ class NicoletIO(NicoletRawIO, BaseFromRaw): # 'group-by-same-units' : one 2D AnalogSignal for each group of channel with same units _prefered_signal_group_mode = "group-by-same-units" - def __init__(self, filepath=""): - NicoletRawIO.__init__(self, filepath=filepath) - BaseFromRaw.__init__(self, filepath) \ No newline at end of file + def __init__(self, filename=""): + NicoletRawIO.__init__(self, filename=filename) + BaseFromRaw.__init__(self, filename) \ No newline at end of file diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index 008d2af8e..e002fc702 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -31,7 +31,7 @@ class NicoletRawIO(BaseRawIO): Parameters ---------- - filepath: str | Path + filename: str | Path The path to the .e file. Will be transformed into a WindowsPath object Notes @@ -171,12 +171,12 @@ class NicoletRawIO(BaseRawIO): '{8CB92AA7-A886-4013-8D52-6CD1C71C72B4}' : 'ETP', } - def __init__(self, filepath = ""): + def __init__(self, filename = ""): BaseRawIO.__init__(self) - self.filepath = Path(filepath) + self.filename = Path(filename) def _source_name(self): - return self.filepath + return self.filename def _parse_header(self): self._extract_header_information() @@ -202,7 +202,7 @@ def _get_tags(self): ('index', 'uint32'), ] - with open(self.filepath, "rb") as fid: + with open(self.filename, "rb") as fid: fid.seek(172) n_tags = read_as_list(fid, [('n_tags', 'uint32')]) @@ -225,7 +225,7 @@ def _get_qi(self): ('l_qi', 'uint64'), ('first_idx', 'uint64', self.n_tags), ] - with open(self.filepath, "rb") as fid: + with open(self.filename, "rb") as fid: fid.seek(172208) qi = read_as_dict(fid, qi_structure) @@ -235,7 +235,7 @@ def _get_main_index(self): main_index = [] current_index = 0 next_index_pointer = self.qi['index_idx'] - with open(self.filepath, "rb") as fid: + with open(self.filename, "rb") as fid: while current_index < self.qi['n_entries']: fid.seek(next_index_pointer) nr_index = read_as_list(fid, @@ -268,7 +268,7 @@ def _read_dynamic_packets(self): [dynamic_packets_instace] = self._get_index_instances(id_str = 'InfoChangeStream') offset = dynamic_packets_instace['offset'] self.n_dynamic_packets = int(dynamic_packets_instace['section_l']/48) - with open(self.filepath, "rb") as fid: + with open(self.filename, "rb") as fid: fid.seek(offset) for i in range(self.n_dynamic_packets): guid_offset = offset + (i+1)*48 @@ -288,7 +288,7 @@ def _read_dynamic_packets(self): self.dynamic_packets = dynamic_packets def _get_dynamic_packets_data(self): - with open(self.filepath, "rb") as fid: + with open(self.filename, "rb") as fid: for i in range(self.n_dynamic_packets): data = [] dynamic_packet_instances = self._get_index_instances(tag = self.dynamic_packets[i]['guid_as_str']) @@ -321,7 +321,7 @@ def _get_patient_guid(self): ('n_values', 'uint64'), ('n_bstr', 'uint64'), ] - with open(self.filepath, "rb") as fid: + with open(self.filename, "rb") as fid: fid.seek(idx_instance['offset']) patient_info = read_as_dict(fid, patient_info_structure @@ -370,7 +370,7 @@ def _get_signal_properties(self): ] idx_instances = self._get_index_instances('SIGNALINFOGUID') for instance in idx_instances: - with open(self.filepath, "rb") as fid: + with open(self.filename, "rb") as fid: fid.seek(instance['offset']) signal_structure = read_as_dict(fid, signal_structure_segment) @@ -401,7 +401,7 @@ def _get_channel_info(self): ], ] idx_instance = self._get_index_instances('CHANNELGUID')[0] - with open(self.filepath, "rb") as fid: + with open(self.filename, "rb") as fid: fid.seek(idx_instance['offset']) channel_structure = read_as_dict(fid, channel_structure_structure[0]) @@ -529,7 +529,7 @@ def _get_segment_start_times(self): segments_properties = [] [segment_instance] = self._get_index_instances('SegmentStream') n_segments = int(segment_instance['section_l']/152) - with open(self.filepath, "rb") as fid: + with open(self.filename, "rb") as fid: fid.seek(segment_instance['offset'], 0) for i in range(n_segments): segment_info = {} @@ -559,7 +559,7 @@ def _get_events(self): event_instances = _ensure_list(self._get_index_instances(tag = 'Events')) for instance in event_instances: offset = instance['offset'] - with open(self.filepath, "rb") as fid: + with open(self.filename, "rb") as fid: pkt_structure = [ ('guid', 'uint8', 16), ('len', 'uint64'), @@ -632,7 +632,7 @@ def _get_events(self): def _get_montage(self): montages = [] montage_instances = self._get_index_instances(id_str = 'DERIVATIONGUID') - with open(self.filepath, "rb") as fid: + with open(self.filename, "rb") as fid: montage_info_structure = [ [('name', 'S2', 32), ], @@ -693,7 +693,7 @@ def _get_raw_signal(self): earliest_signal_index = [tag['tag'] for tag in self.tags].index('0') offset = [index['offset'] for index in self.main_index if index['section_idx'] == earliest_signal_index][0] - raw_signal = np.memmap(self.filepath, dtype="i2", offset = offset, mode="r") + raw_signal = np.memmap(self.filename, dtype="i2", offset = offset, mode="r") self.signal_data_offset = offset self.raw_signal = raw_signal From 94fa06286cc7bc432ea7cb39b224b68d4f12edc0 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Thu, 20 Feb 2025 16:30:35 +0100 Subject: [PATCH 38/63] Add timezone-independent readin of dates --- neo/rawio/nicoletrawio.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index e002fc702..789a31fdc 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -12,7 +12,7 @@ import numpy as np import pandas as pd import warnings -from datetime import datetime, timedelta +from datetime import datetime, timedelta, timezone from pathlib import Path from neo.rawio.baserawio import ( @@ -46,9 +46,6 @@ class NicoletRawIO(BaseRawIO): TSLABELSIZE = 64 UNITSIZE = 16 ITEMNAMESIZE = 64 - UNIX_TIME_CONVERSION = 2209161600+10800 #Dates are saved with an Origin of 1899-12-31 in .e files, but python datetime requires unix time. Also, dates are in UTC+3, so add 3 hours to UNIX (i.e. start 3 hours later) to get UTC - SEC_PER_DAY = 86400 - TAGS_DICT = { 'ExtraDataTags' : 'ExtraDataTags', 'SegmentStream' : 'SegmentStream', @@ -543,7 +540,7 @@ def _get_segment_start_times(self): segment_info['ref_names'] = [info['ref_sensor'] for info in self.ts_properties] segment_info['sampling_rates'] = [info['sampling_rate'] for info in self.ts_properties] segment_info['scale'] = [info['resolution'] for info in self.ts_properties] - date_str = datetime.fromtimestamp(segment_info['date_ole']*self.SEC_PER_DAY - self.UNIX_TIME_CONVERSION) + date_str = self._convert_ole_to_datetime(segment_info['date_ole']) start_date = date_str.date() start_time = date_str.time() segment_info['date'] = date_str @@ -592,7 +589,7 @@ def _get_events(self): fid.seek(16, 1) event = event | read_as_dict(fid, event_structure[2]) - event['date'] = datetime.fromtimestamp(event['date_ole']*self.SEC_PER_DAY + event['date_fraction'] - self.UNIX_TIME_CONVERSION) + event['date'] = self._convert_ole_to_datetime(event['date_ole'], event['date_fraction']) event['timestamp'] = (event['date'] - self.segments_properties[0]['date']).total_seconds() event['guid'] = _convert_to_guid(event['guid']) try: @@ -627,7 +624,10 @@ def _get_events(self): self.events = events pass - + def _convert_ole_to_datetime(self, date_ole, date_fraction = 0): + '''Date is saved as OLE with the timezone offset integrated in the file. Transform this to datetime object and add the date_fraction if provided''' + return datetime.fromtimestamp((date_ole - 25569) * 24 * 3600 + date_fraction, + tz = timezone.utc) def _get_montage(self): montages = [] @@ -978,7 +978,7 @@ def _convert_to_guid(hex_list, dec_list = [f'{nr:x}'.upper().rjust(2, '0') for nr in hex_list] return('{' + guid_format.format(*dec_list) + '}') -def _convert_to_date(data_float, origin = '30-12-1899'): #Set Origin to 1 day back to account for 1 day offset +def _convert_to_date(data_float, origin = '30-12-1899'): #Set Origin to 1 day back to account for OLE considering 1900 as a leap year return(datetime.strptime(origin, '%d-%m-%Y') + timedelta(seconds = int(data_float*24*60*60))) From c013d7355a0220e2714c77592f3bdc0300495f55 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Thu, 20 Feb 2025 17:02:47 +0100 Subject: [PATCH 39/63] Add proper test file --- neo/test/iotest/test_nicoletio.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neo/test/iotest/test_nicoletio.py b/neo/test/iotest/test_nicoletio.py index 50b9cf151..388fb36d4 100644 --- a/neo/test/iotest/test_nicoletio.py +++ b/neo/test/iotest/test_nicoletio.py @@ -22,7 +22,7 @@ class TestExampleIO( ioclass = NicoletIO entities_to_download = ["nicolet"] entities_to_test = [ - "nicolet/e_files/test_nicolet.e", + "nicolet/e_files/test.e", ] def setUp(self): From fc018d076fe2a77779b2d7a428e71c565ce90cbd Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Thu, 20 Feb 2025 17:04:43 +0100 Subject: [PATCH 40/63] Remove windows path --- neo/rawio/nicoletrawio.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index 789a31fdc..b4541dae7 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -170,7 +170,7 @@ class NicoletRawIO(BaseRawIO): def __init__(self, filename = ""): BaseRawIO.__init__(self) - self.filename = Path(filename) + self.filename = filename def _source_name(self): return self.filename From 6ad2f493ee00706de2bcec0d4b20a791f9b4b24a Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Thu, 20 Feb 2025 17:15:53 +0100 Subject: [PATCH 41/63] Add proper file to test_nicoletrawio --- neo/test/rawiotest/test_nicoletrawio.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neo/test/rawiotest/test_nicoletrawio.py b/neo/test/rawiotest/test_nicoletrawio.py index e10dd704b..81f6a003c 100644 --- a/neo/test/rawiotest/test_nicoletrawio.py +++ b/neo/test/rawiotest/test_nicoletrawio.py @@ -12,7 +12,7 @@ class TestExampleRawIO( rawioclass = NicoletRawIO entities_to_download = ['nicolet'] - entities_to_test = ["nicolet/e_files/test_nicolet.e"] + entities_to_test = ["nicolet/e_files/test.e"] if __name__ == "__main__": unittest.main() From 326cf7827d014b6427acf74ccfb9d3442f7032a8 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Thu, 3 Apr 2025 18:16:34 +0200 Subject: [PATCH 42/63] Add function to read buffer information of raw signal --- neo/rawio/nicoletrawio.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index b4541dae7..fb149b460 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -192,6 +192,7 @@ def _parse_header(self): dtype = _event_channel_dtype) self._generate_minimal_annotations() self._generate_additional_annotations() + self._get_buffer_descriptions() def _get_tags(self): tags_structure = [ @@ -697,6 +698,28 @@ def _get_raw_signal(self): self.signal_data_offset = offset self.raw_signal = raw_signal + def _get_buffer_descriptions(self): + ''' + Get the descriptions of raw signal buffers + + TODO: File offset + TODO: Support for multiple signal streams + ''' + buffer_id = 0 + self._buffer_descriptions = {0: {}} + for seg_index, segment in enumerate(self.segments_properties): + self._buffer_descriptions[0][seg_index] = {} + shape = (max(self.get_nr_samples(seg_index = seg_index)), + segment['sampling_rates'].count(segment['sampling_rates'][0])) + self._buffer_descriptions[0][seg_index][buffer_id] = { + "type": "raw", + "file_path": str(self.filename), + "dtype": 'i2', + "order": "C", + #"file_offset": file_offset, + "shape": shape, + } + def _extract_header_information(self): self._get_tags() self._get_qi() @@ -939,6 +962,9 @@ def _get_index_instances(self, id_str = '', tag = ''): 'section_l': 0 }] return(idx_instance) + + def _get_analogsignal_buffer_description(self, block_index, seg_index, buffer_id): + return self._buffer_descriptions[block_index][seg_index][buffer_id] def read_as_dict(fid, dtype): info = dict() From 32cdbbabbd76d11de7f773ee66cdf042774f53b6 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Mon, 7 Apr 2025 13:45:49 +0200 Subject: [PATCH 43/63] Add readin of consecutive segments --- neo/rawio/nicoletrawio.py | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index b4541dae7..f5779e5de 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -797,33 +797,27 @@ def _get_analogsignal_chunk(self, raise IndexError(f"Segment Index out of range. There are {self.header['nb_segment'][block_index]} segments for block {block_index}") if channel_indexes is None: channel_indexes = [i for i, channel in enumerate(self.header['signal_channels']) if channel['stream_id'] == str(stream_index)] - nb_chan = len(channel_indexes) elif isinstance(channel_indexes, slice): channel_indexes = np.arange(self.header['signal_channels'].shape[0], dtype="int")[channel_indexes] - nb_chan = len(channel_indexes) else: channel_indexes = np.asarray(channel_indexes) if any(channel_indexes < 0): raise IndexError("Channel Indices cannot be negative") if any(channel_indexes >= len(self.header['signal_channels'].shape[0])): raise IndexError("Channel Indices out of range") - nb_chan = len(channel_indexes) if i_start is None: i_start = 0 if i_stop is None: i_stop = max(self.get_nr_samples(seg_index = seg_index, stream_index = stream_index)) if i_start < 0 or i_stop > max(self.get_nr_samples(seg_index = seg_index, stream_index = stream_index)): #Get the maximum number of samples for the respective sampling rate raise IndexError("Start or Stop Index out of bounds") - eeg_sampling_rate = max(self.segments_properties[seg_index]['sampling_rates']) - current_samplingrate = self.segments_properties[seg_index]['sampling_rates'][channel_indexes[0]] + current_samplingrate = self.segments_properties[seg_index]['sampling_rates'][channel_indexes[0]] #Non signal-stream specific, just take the sampling rate of the first channel cum_segment_duration = [0] + list(np.cumsum([(segment['duration'].total_seconds()) for segment in self.segments_properties])) - data = np.empty([i_stop - i_start, self.segments_properties[0]['sampling_rates'].count(current_samplingrate)]) + data = np.empty([i_stop - i_start, self.segments_properties[seg_index]['sampling_rates'].count(current_samplingrate)]) for i, channel_index in enumerate(channel_indexes): print('Current Channel: ' + str(i)) current_samplingrate = self.segments_properties[seg_index]['sampling_rates'][i] multiplicator = self.segments_properties[seg_index]['scale'][i] - #if current_samplingrate != eeg_sampling_rate: # Only keeps the channels with the eeg sampling rate, all others get skipped - # continue [tag_idx] = [tag['index'] for tag in self.tags if tag['tag'] == str(i)] all_sections = [j for j, idx_id in enumerate(self.all_section_ids) if idx_id == tag_idx] section_lengths = [int(index['section_l']/2) for j, index in enumerate(self.main_index) if j in all_sections] @@ -833,13 +827,8 @@ def _get_analogsignal_chunk(self, last_section_for_seg = _get_relevant_section(cum_section_lengths, current_samplingrate* self.segments_properties[seg_index]['duration'].total_seconds()) - 1 + first_section_for_seg - offset_section_lengths = [length - cum_section_lengths[first_section_for_seg] for length in cum_section_lengths] - first_section = _get_relevant_section(cum_section_lengths, i_start - 1) - last_section = _get_relevant_section(cum_section_lengths, i_stop + 1) - 1 - if last_section > last_section_for_seg: - raise IndexError(f'Index out of range for channel {channel_index}') - use_sections = all_sections[first_section:last_section] - use_sections_length = section_lengths[first_section:last_section] + use_sections = all_sections[first_section_for_seg:last_section_for_seg] + use_sections_length = section_lengths[first_section_for_seg:last_section_for_seg] np_idx = 0 for section_idx, section_length in zip(use_sections, use_sections_length): cur_sec = self.main_index[section_idx] From 85b0d295f07b5ecfeea41c9995164ee216817d12 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Fri, 2 May 2025 16:26:55 +0200 Subject: [PATCH 44/63] Add buffer description for EEG signal stream --- neo/rawio/nicoletrawio.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index b4a016e4c..57fdef400 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -708,15 +708,22 @@ def _get_buffer_descriptions(self): buffer_id = 0 self._buffer_descriptions = {0: {}} for seg_index, segment in enumerate(self.segments_properties): - self._buffer_descriptions[0][seg_index] = {} + current_samplingrate = segment['sampling_rates'][0] #Non signal-stream specific, just take the sampling rate of the first channel + skip_values = ([0] + list(np.cumsum([(segment['duration'].total_seconds()) for segment in self.segments_properties])))[seg_index] * current_samplingrate + [tag_idx] = [tag['index'] for tag in self.tags if tag['tag'] == '0'] + all_sections = [j for j, idx_id in enumerate(self.all_section_ids) if idx_id == tag_idx] + section_lengths = [0] + list(np.cumsum([int(index['section_l']/2) for j, index in enumerate(self.main_index) if j in all_sections])) + first_section_for_seg = _get_relevant_section(section_lengths, skip_values) - 1 + offset = self.main_index[all_sections[first_section_for_seg]]['offset'] shape = (max(self.get_nr_samples(seg_index = seg_index)), segment['sampling_rates'].count(segment['sampling_rates'][0])) + self._buffer_descriptions[0][seg_index] = {} self._buffer_descriptions[0][seg_index][buffer_id] = { "type": "raw", "file_path": str(self.filename), "dtype": 'i2', "order": "C", - #"file_offset": file_offset, + "file_offset": offset, "shape": shape, } From 164fc66635297b6f9b8d9b755f214f04fcf2c077 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Tue, 1 Jul 2025 16:42:58 +0200 Subject: [PATCH 45/63] Add io and rawio to init --- neo/io/__init__.py | 7 +++++++ neo/rawio/__init__.py | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/neo/io/__init__.py b/neo/io/__init__.py index 121b4c010..fbd094718 100644 --- a/neo/io/__init__.py +++ b/neo/io/__init__.py @@ -48,6 +48,7 @@ * :attr:`NeuroNexusIO` * :attr:`NeuroScopeIO` * :attr:`NeuroshareIO` +* :attr:`NicoletIO` * :attr:`NixIO` * :attr:`NWBIO` * :attr:`OpenEphysIO` @@ -204,6 +205,10 @@ .. autoattribute:: extensions +.. autoclass:: neo.io.NicoletIO + + .. autoattribute:: extensions + .. autoclass:: neo.io.NixIO .. autoattribute:: extensions @@ -326,6 +331,7 @@ from neo.io.neuroexplorerio import NeuroExplorerIO from neo.io.neuronexusio import NeuroNexusIO from neo.io.neuroscopeio import NeuroScopeIO +from neo.io.nicoletio import NicoletIO from neo.io.nixio import NixIO from neo.io.nixio_fr import NixIO as NixIOFr from neo.io.nwbio import NWBIO @@ -384,6 +390,7 @@ NeuroNexusIO, NeuroScopeIO, NeuroshareIO, + NicoletIO, NWBIO, OpenEphysIO, OpenEphysBinaryIO, diff --git a/neo/rawio/__init__.py b/neo/rawio/__init__.py index f2c464d18..b69ac4173 100644 --- a/neo/rawio/__init__.py +++ b/neo/rawio/__init__.py @@ -31,6 +31,7 @@ * :attr:`NeuroExplorerRawIO` * :attr:`NeuroNexusRawIO` * :attr:`NeuroScopeRawIO` +* :attr:`NicoletRawIO` * :attr:`NIXRawIO` * :attr:`OpenEphysRawIO` * :attr:`OpenEphysBinaryRawIO` @@ -123,6 +124,10 @@ .. autoattribute:: extensions +.. autoclass:: neo.rawio.NicoletRawIO + + .. autoattribute:: extensions + .. autoclass:: neo.rawio.NIXRawIO .. autoattribute:: extensions @@ -204,6 +209,7 @@ from neo.rawio.neuroexplorerrawio import NeuroExplorerRawIO from neo.rawio.neuronexusrawio import NeuroNexusRawIO from neo.rawio.neuroscoperawio import NeuroScopeRawIO +from neo.rawio.nicoletrawio import NicoletRawIO from neo.rawio.nixrawio import NIXRawIO from neo.rawio.openephysrawio import OpenEphysRawIO from neo.rawio.openephysbinaryrawio import OpenEphysBinaryRawIO @@ -239,6 +245,7 @@ NeuroExplorerRawIO, NeuroNexusRawIO, NeuroScopeRawIO, + NicoletRawIO, NIXRawIO, OpenEphysRawIO, OpenEphysBinaryRawIO, From b6d750e100d424f9f0302824c6f740741ea86e7f Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Wed, 2 Jul 2025 14:12:35 +0200 Subject: [PATCH 46/63] Change name of test to NicoletRawIO test --- neo/test/rawiotest/test_nicoletrawio.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neo/test/rawiotest/test_nicoletrawio.py b/neo/test/rawiotest/test_nicoletrawio.py index 81f6a003c..77309d551 100644 --- a/neo/test/rawiotest/test_nicoletrawio.py +++ b/neo/test/rawiotest/test_nicoletrawio.py @@ -5,7 +5,7 @@ from neo.test.rawiotest.common_rawio_test import BaseTestRawIO -class TestExampleRawIO( +class TestNicoletRawIO( BaseTestRawIO, unittest.TestCase, ): From 206fef5f1eb770bacfe15f9e60b1868e84111e57 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Wed, 2 Jul 2025 14:14:56 +0200 Subject: [PATCH 47/63] Remove nb_events from __array_annotations__ --- neo/rawio/nicoletrawio.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index 57fdef400..4f90536a3 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -810,10 +810,7 @@ def _generate_additional_annotations(self): seg_annotations['name'] = self.patient_info['altID'] seg_annotations['date'] = self.segments_properties[i]['date'] seg_annotations['duration'] = self.segments_properties[i]['duration'].total_seconds() - for event_types in seg_annotations['events']: - event_types['__array_annotations__']['nb_events'] = len([event for event in self.events - if event['seg_index'] == i and event['type'] == event_types['id']]) - + def _get_analogsignal_chunk(self, block_index: int = 0, seg_index: int = 0, From 2022cb85c1ccef7430ccbb97eedd33102c6cc41b Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Wed, 2 Jul 2025 14:15:44 +0200 Subject: [PATCH 48/63] Fix usage of len function on integer --- neo/rawio/nicoletrawio.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index 4f90536a3..d3c09289e 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -830,7 +830,7 @@ def _get_analogsignal_chunk(self, channel_indexes = np.asarray(channel_indexes) if any(channel_indexes < 0): raise IndexError("Channel Indices cannot be negative") - if any(channel_indexes >= len(self.header['signal_channels'].shape[0])): + if any(channel_indexes >= self.header['signal_channels'].shape[0]): raise IndexError("Channel Indices out of range") if i_start is None: i_start = 0 From 87156a52d75f2128419adc26f4d1728adc088a6a Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Wed, 2 Jul 2025 14:17:00 +0200 Subject: [PATCH 49/63] Fix signal readin for start and stop within the section --- neo/rawio/nicoletrawio.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index d3c09289e..2d519911c 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -840,7 +840,7 @@ def _get_analogsignal_chunk(self, raise IndexError("Start or Stop Index out of bounds") current_samplingrate = self.segments_properties[seg_index]['sampling_rates'][channel_indexes[0]] #Non signal-stream specific, just take the sampling rate of the first channel cum_segment_duration = [0] + list(np.cumsum([(segment['duration'].total_seconds()) for segment in self.segments_properties])) - data = np.empty([i_stop - i_start, self.segments_properties[seg_index]['sampling_rates'].count(current_samplingrate)]) + data = np.empty([i_stop - i_start, len(channel_indexes)]) for i, channel_index in enumerate(channel_indexes): print('Current Channel: ' + str(i)) current_samplingrate = self.segments_properties[seg_index]['sampling_rates'][i] @@ -857,10 +857,15 @@ def _get_analogsignal_chunk(self, use_sections = all_sections[first_section_for_seg:last_section_for_seg] use_sections_length = section_lengths[first_section_for_seg:last_section_for_seg] np_idx = 0 - for section_idx, section_length in zip(use_sections, use_sections_length): + for j, (section_idx, section_length) in enumerate(zip(use_sections, use_sections_length)): cur_sec = self.main_index[section_idx] - start = int((cur_sec['offset'] - self.signal_data_offset)/2) - stop = start + section_length + start = int((cur_sec['offset'] - self.signal_data_offset)/2) + if (i_start > start): + start = i_start + if (i_stop - i_start) < (section_length*(j+1)): + stop = start + (i_stop - i_start - section_length*j) + else: + stop = start + section_length data[np_idx:(np_idx + section_length), i] = multiplicator*self.raw_signal[slice(start, stop)] np_idx += section_length return data From 8da32d9bd0c7343c227a45ffc0a8243ba8b7a373 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Wed, 2 Jul 2025 14:17:32 +0200 Subject: [PATCH 50/63] Fix event counts --- neo/rawio/nicoletrawio.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index 2d519911c..ccf26a705 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -914,8 +914,9 @@ def _rescale_spike_timestamp(self, spike_timestamps: np.ndarray, dtype: np.dtype return None def _event_count(self, block_index: int = 0, seg_index: int = 0, event_channel_index: int = 0): - return self.raw_annotations['blocks'][block_index]['segments'][seg_index]['events'][event_channel_index]['__array_annotations__']['nb_events'] - + return len([event for event in self.events if (event['block_index'] == block_index + and event['seg_index'] == seg_index + and event['type'] == str(event_channel_index))]) def _get_event_timestamps( self, block_index: int = 0, seg_index: int = 0, event_channel_index: int = 0, t_start: float = None, t_stop: float = None From 54e80cee2fada569a573b9d93c603d9129f06865 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Wed, 2 Jul 2025 14:18:24 +0200 Subject: [PATCH 51/63] Remove print of channel name --- neo/rawio/nicoletrawio.py | 1 - 1 file changed, 1 deletion(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index ccf26a705..6a12942cf 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -842,7 +842,6 @@ def _get_analogsignal_chunk(self, cum_segment_duration = [0] + list(np.cumsum([(segment['duration'].total_seconds()) for segment in self.segments_properties])) data = np.empty([i_stop - i_start, len(channel_indexes)]) for i, channel_index in enumerate(channel_indexes): - print('Current Channel: ' + str(i)) current_samplingrate = self.segments_properties[seg_index]['sampling_rates'][i] multiplicator = self.segments_properties[seg_index]['scale'][i] [tag_idx] = [tag['index'] for tag in self.tags if tag['tag'] == str(i)] From 74caebae858b63c503e209e9b7b9fc4dc50fa866 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Wed, 2 Jul 2025 14:19:16 +0200 Subject: [PATCH 52/63] Add handling of slice(None) --- neo/rawio/nicoletrawio.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index 6a12942cf..597095c84 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -825,6 +825,8 @@ def _get_analogsignal_chunk(self, if channel_indexes is None: channel_indexes = [i for i, channel in enumerate(self.header['signal_channels']) if channel['stream_id'] == str(stream_index)] elif isinstance(channel_indexes, slice): + if channel_indexes == slice(None): + channel_indexes = [i for i, channel in enumerate(self.header['signal_channels']) if channel['stream_id'] == str(stream_index)] channel_indexes = np.arange(self.header['signal_channels'].shape[0], dtype="int")[channel_indexes] else: channel_indexes = np.asarray(channel_indexes) From bc60f94cc1e4ead5b5bffa53c77d1a84e5a8791d Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Wed, 2 Jul 2025 14:25:18 +0200 Subject: [PATCH 53/63] Add buffer with id 0 --- neo/rawio/nicoletrawio.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index 597095c84..7b1d764c8 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -180,7 +180,7 @@ def _parse_header(self): self.header = {} self.header["nb_block"] = 1 self.header["nb_segment"] = [len(self.segments_properties)] - self.header["signal_buffers"] = np.array([], + self.header["signal_buffers"] = np.array(['Signals', '0'], dtype=_signal_buffer_dtype) self.header["signal_channels"] = self._create_signal_channels(_signal_channel_dtype) if self.channel_properties else self._create_signal_channels_no_channel_props(_signal_channel_dtype) self.header["signal_streams"] = np.array([(f"Signals {signal_id}", signal_id, "0") for signal_id in self.signal_streams.values()], @@ -788,7 +788,7 @@ def _create_signal_channels_no_channel_props(self, dtype): timestream['resolution'], timestream['eeg_offset'], signal_streams[timestream['sampling_rate']], - 0)) + '0')) self.signal_streams = signal_streams return np.array(signal_channels, dtype = dtype) From eec690856ccd84992ae3471e6638760f89f58639 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Wed, 2 Jul 2025 14:26:34 +0200 Subject: [PATCH 54/63] Always use create_signal_channels based on TS --- neo/rawio/nicoletrawio.py | 28 +--------------------------- 1 file changed, 1 insertion(+), 27 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index 7b1d764c8..5be97e3f1 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -182,7 +182,7 @@ def _parse_header(self): self.header["nb_segment"] = [len(self.segments_properties)] self.header["signal_buffers"] = np.array(['Signals', '0'], dtype=_signal_buffer_dtype) - self.header["signal_channels"] = self._create_signal_channels(_signal_channel_dtype) if self.channel_properties else self._create_signal_channels_no_channel_props(_signal_channel_dtype) + self.header["signal_channels"] = self._create_signal_channels(_signal_channel_dtype) self.header["signal_streams"] = np.array([(f"Signals {signal_id}", signal_id, "0") for signal_id in self.signal_streams.values()], dtype=_signal_stream_dtype) self.header["spike_channels"] = np.array([], @@ -743,32 +743,6 @@ def _extract_header_information(self): self._get_raw_signal() def _create_signal_channels(self, dtype): - signal_channels = [] - signal_streams = {} - stream_id = 0 - for channel in self.channel_properties: - signal = next((item for item in self.signal_properties if item["name"] == channel['sensor']), None) - timestream = next((item for item in self.ts_properties if item["label"].split('-')[0] == channel['sensor']), None) - if signal is None or timestream is None: - continue - if channel['sampling_rate'] not in signal_streams.keys(): - signal_streams[channel['sampling_rate']] = stream_id - stream_id += 1 - signal_channels.append(( - channel['sensor'], - channel['l_input_id'], - channel['sampling_rate'], - 'int16', - signal['transducer'], - timestream['resolution'], - timestream['eeg_offset'], - signal_streams[timestream['sampling_rate']], - 0,)) - self.signal_streams = signal_streams - - return np.array(signal_channels, dtype = dtype) - - def _create_signal_channels_no_channel_props(self, dtype): signal_channels = [] signal_streams = {} stream_id = 0 From 1b0e3edc2497b323b0ab3b2369a40e31952e8cf9 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Wed, 2 Jul 2025 14:30:01 +0200 Subject: [PATCH 55/63] Change Name to TestNicoletIO --- neo/test/iotest/test_nicoletio.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neo/test/iotest/test_nicoletio.py b/neo/test/iotest/test_nicoletio.py index 388fb36d4..625e2933d 100644 --- a/neo/test/iotest/test_nicoletio.py +++ b/neo/test/iotest/test_nicoletio.py @@ -15,7 +15,7 @@ import numpy as np -class TestExampleIO( +class TestNicoletIO( BaseTestIO, unittest.TestCase, ): From 63a9d347c83b8b7904a91621d5629bfe570142e9 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Wed, 2 Jul 2025 15:51:17 +0200 Subject: [PATCH 56/63] Add test for every entitiy --- neo/test/iotest/test_nicoletio.py | 99 +++++++++++++------------------ 1 file changed, 40 insertions(+), 59 deletions(-) diff --git a/neo/test/iotest/test_nicoletio.py b/neo/test/iotest/test_nicoletio.py index 625e2933d..4f24e2cc3 100644 --- a/neo/test/iotest/test_nicoletio.py +++ b/neo/test/iotest/test_nicoletio.py @@ -35,71 +35,52 @@ def tearDown(self) -> None: super().tearDown() for entity in self.entities_to_test: full_path = get_test_file_full_path(self.ioclass, filename=entity, directory=self.local_test_dir) - pathlib.Path(full_path).unlink(missing_ok=True) + #pathlib.Path(full_path).unlink(missing_ok=True) -class TestNicoletIO(unittest.TestCase): def test_read_segment_lazy(self): - r = NicoletIO(filename=None) - seg = r.read_segment(lazy=True) - for ana in seg.analogsignals: - assert isinstance(ana, AnalogSignalProxy) - ana = ana.load() - assert isinstance(ana, AnalogSignal) - for st in seg.spiketrains: - assert isinstance(st, SpikeTrainProxy) - st = st.load() - assert isinstance(st, SpikeTrain) - - seg = r.read_segment(lazy=False) - for anasig in seg.analogsignals: - assert isinstance(ana, AnalogSignal) - self.assertNotEqual(anasig.size, 0) - for st in seg.spiketrains: - assert isinstance(st, SpikeTrain) - self.assertNotEqual(st.size, 0) - - # annotations - assert "seg_extra_info" in seg.annotations - assert seg.name == "Seg #0 Block #0" - for anasig in seg.analogsignals: - assert anasig.name is not None - for st in seg.spiketrains: - assert st.name is not None - for ev in seg.events: - assert ev.name is not None - for ep in seg.epochs: - assert ep.name is not None + for entity in self.entities_to_test: + r = NicoletIO(filename=get_test_file_full_path(self.ioclass, filename=entity, directory=self.local_test_dir)) + seg = r.read_segment(lazy=True) + for ana in seg.analogsignals: + assert isinstance(ana, AnalogSignalProxy) + ana = ana.load() + assert isinstance(ana, AnalogSignal) + + seg = r.read_segment(lazy=False) + for anasig in seg.analogsignals: + assert isinstance(ana, AnalogSignal) + self.assertNotEqual(anasig.size, 0) + + # annotations + for anasig in seg.analogsignals: + assert anasig.name is not None + for ev in seg.events: + assert ev.name is not None + for ep in seg.epochs: + assert ep.name is not None def test_read_block(self): - r = NicoletIO(filename=None) - bl = r.read_block(lazy=True) - # assert len(bl.list_units) == 3 - # assert len(bl.channel_indexes) == 1 + 1 # signals grouped + units grouped + for entity in self.entities_to_test: + r = NicoletIO(filename=get_test_file_full_path(self.ioclass, filename=entity, directory=self.local_test_dir)) + bl = r.read_block(lazy=True) def test_read_segment_with_time_slice(self): - r = NicoletIO(filename=None) - seg = r.read_segment(time_slice=None) - shape_full = seg.analogsignals[0].shape - spikes_full = seg.spiketrains[0] - event_full = seg.events[0] - - t_start, t_stop = 260 * pq.ms, 1.854 * pq.s - seg = r.read_segment(time_slice=(t_start, t_stop)) - shape_slice = seg.analogsignals[0].shape - spikes_slice = seg.spiketrains[0] - event_slice = seg.events[0] - - assert shape_full[0] > shape_slice[0] - - assert spikes_full.size > spikes_slice.size - assert np.all(spikes_slice >= t_start) - assert np.all(spikes_slice <= t_stop) - assert spikes_slice.t_start == t_start - assert spikes_slice.t_stop == t_stop - - assert event_full.size > event_slice.size - assert np.all(event_slice.times >= t_start) - assert np.all(event_slice.times <= t_stop) + for entity in self.entities_to_test: + r = NicoletIO(filename=get_test_file_full_path(self.ioclass, filename=entity, directory=self.local_test_dir)) + seg = r.read_segment(time_slice=None) + shape_full = seg.analogsignals[0].shape + event_full = seg.events[0] + + t_start, t_stop = 260 * pq.ms, 1.854 * pq.s + seg = r.read_segment(time_slice=(t_start, t_stop)) + shape_slice = seg.analogsignals[0].shape + event_slice = seg.events[0] + + assert shape_full[0] > shape_slice[0] + + assert event_full.size > event_slice.size + assert np.all(event_slice.times >= t_start) + assert np.all(event_slice.times <= t_stop) if __name__ == "__main__": From f079e6482f83e467f22ce5f464e1412cc0715ac5 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Wed, 2 Jul 2025 16:13:44 +0200 Subject: [PATCH 57/63] Remove unused imports --- neo/rawio/nicoletrawio.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index 5be97e3f1..03861b0fd 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -10,10 +10,8 @@ from __future__ import annotations import numpy as np -import pandas as pd import warnings from datetime import datetime, timedelta, timezone -from pathlib import Path from neo.rawio.baserawio import ( BaseRawIO, From 72aa644f01c6fe15d6b8a997108e5ea2e03ee0e3 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Fri, 4 Jul 2025 08:09:11 +0200 Subject: [PATCH 58/63] Add basic docstring to every function --- neo/rawio/nicoletrawio.py | 133 +++++++++++++++++++++++++++++++++++--- 1 file changed, 125 insertions(+), 8 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index 03861b0fd..7f0c5c204 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -29,12 +29,8 @@ class NicoletRawIO(BaseRawIO): Parameters ---------- - filename: str | Path - The path to the .e file. Will be transformed into a WindowsPath object - - Notes - ---------- - Currently, only channels that have the same sampling rate as the EEG-Channels will be processed. Other channels will be discarded. + filename: str, default : None + The .e file to be loaded ''' extensions = ["e"] @@ -171,9 +167,15 @@ def __init__(self, filename = ""): self.filename = filename def _source_name(self): + ''' + Returns path of the input file + ''' return self.filename def _parse_header(self): + ''' + Parses the default header structure and generates some more annotions + ''' self._extract_header_information() self.header = {} self.header["nb_block"] = 1 @@ -181,7 +183,8 @@ def _parse_header(self): self.header["signal_buffers"] = np.array(['Signals', '0'], dtype=_signal_buffer_dtype) self.header["signal_channels"] = self._create_signal_channels(_signal_channel_dtype) - self.header["signal_streams"] = np.array([(f"Signals {signal_id}", signal_id, "0") for signal_id in self.signal_streams.values()], + self.header["signal_streams"] = np.array([(f"Signals {signal_id}", signal_id, "0") + for signal_id in self.signal_streams.values()], dtype=_signal_stream_dtype) self.header["spike_channels"] = np.array([], dtype= _spike_channel_dtype) @@ -193,6 +196,9 @@ def _parse_header(self): self._get_buffer_descriptions() def _get_tags(self): + ''' + Get tags that specify the index of different information within the main index pointers + ''' tags_structure = [ ('tag', 'S80'), ('index', 'uint32'), @@ -213,6 +219,9 @@ def _get_tags(self): self.tags = tags def _get_qi(self): + ''' + Get QI that specifies the number of main index pointers, and where they are located within the file + ''' qi_structure = [ ('n_entries', 'uint32'), ('misc1', 'uint32'), @@ -228,6 +237,9 @@ def _get_qi(self): self.qi = qi def _get_main_index(self): + ''' + Get all main index pointers. They show where + ''' main_index = [] current_index = 0 next_index_pointer = self.qi['index_idx'] @@ -253,6 +265,9 @@ def _get_main_index(self): self.all_section_ids = [entry['section_idx'] for entry in main_index] def _read_dynamic_packets(self): + ''' + Read the packets which specify where the data is located + ''' dynamic_packet_structure = [ ('guid_list', 'uint8', 16), ('date', 'float64'), @@ -284,6 +299,9 @@ def _read_dynamic_packets(self): self.dynamic_packets = dynamic_packets def _get_dynamic_packets_data(self): + ''' + Read the data within the dynamic packets + ''' with open(self.filename, "rb") as fid: for i in range(self.n_dynamic_packets): data = [] @@ -310,6 +328,9 @@ def _get_dynamic_packets_data(self): self.dynamic_packets[i]['data'] = np.array(data) def _get_patient_guid(self): + ''' + Read patient metadata + ''' [idx_instance] = self._get_index_instances(id_str = 'PATIENTINFOGUID') patient_info_structure = [ ('guid', 'uint8', 16), @@ -350,6 +371,9 @@ def _get_patient_guid(self): self.patient_info = patient_info def _get_signal_properties(self): + ''' + Get the properties for every signal channel + ''' signal_properties_segment = [ ('name', 'S2', self.LABELSIZE), ('transducer', 'S2', self.UNITSIZE), @@ -387,6 +411,9 @@ def _get_signal_properties(self): pass def _get_channel_info(self): + ''' + Get the properties for every signal channel + ''' channel_properties = [] channel_structure_structure= [ [('guid', 'uint8', 16), @@ -432,6 +459,9 @@ def _get_channel_info(self): self.channel_properties = channel_properties def _get_ts_properties_all(self): + ''' + DEPECRATED + ''' ts_packets_properties = [] ts_packets = [packet for packet in self.dynamic_packets if packet['id_str'] == 'TSGUID'] l_ts_packets = len(ts_packets) @@ -477,6 +507,10 @@ def _get_ts_properties_all(self): pass def _get_ts_properties(self, ts_packet_index = 0): + ''' + Read properties of every timestream. + So far, only the first instance of the timestream is used for every segment + ''' ts_properties = [] ts_packets = [packet for packet in self.dynamic_packets if packet['id_str'] == 'TSGUID'] l_ts_packets = len(ts_packets) @@ -522,6 +556,9 @@ def _get_ts_properties(self, ts_packet_index = 0): self.ts_properties = ts_properties def _get_segment_start_times(self): + ''' + Get the start and stop times and the duration of each segment + ''' segments_properties = [] [segment_instance] = self._get_index_instances('SegmentStream') n_segments = int(segment_instance['section_l']/152) @@ -550,6 +587,9 @@ def _get_segment_start_times(self): self.segments_properties = segments_properties def _get_events(self): + ''' + Read all events + ''' events = [] event_packet_guid = '{B799F680-72A4-11D3-93D3-00500400C148}' event_instances = _ensure_list(self._get_index_instances(tag = 'Events')) @@ -629,6 +669,9 @@ def _convert_ole_to_datetime(self, date_ole, date_fraction = 0): tz = timezone.utc) def _get_montage(self): + ''' + Read the montages + ''' montages = [] montage_instances = self._get_index_instances(id_str = 'DERIVATIONGUID') with open(self.filename, "rb") as fid: @@ -681,6 +724,9 @@ def _get_montage(self): self.display = display def get_nr_samples(self, block_index = 0, seg_index = 0, stream_index = 0): + ''' + Get the number of samples for a given signal stream in a given segment + ''' try: duration = self.segments_properties[seg_index]['duration'].total_seconds() return([int(sampling_rate * duration) for sampling_rate in self.segments_properties[seg_index]['sampling_rates'] if self.signal_streams[sampling_rate] == stream_index]) @@ -689,6 +735,9 @@ def get_nr_samples(self, block_index = 0, seg_index = 0, stream_index = 0): pass def _get_raw_signal(self): + ''' + Create a memmap of the raw signal + ''' earliest_signal_index = [tag['tag'] for tag in self.tags].index('0') offset = [index['offset'] for index in self.main_index if index['section_idx'] == earliest_signal_index][0] @@ -726,6 +775,9 @@ def _get_buffer_descriptions(self): } def _extract_header_information(self): + ''' + Create header information by reading file metadata + ''' self._get_tags() self._get_qi() self._get_main_index() @@ -741,6 +793,9 @@ def _extract_header_information(self): self._get_raw_signal() def _create_signal_channels(self, dtype): + ''' + Create information for signal channels based on timestream and signal_properties + ''' signal_channels = [] signal_streams = {} stream_id = 0 @@ -765,6 +820,9 @@ def _create_signal_channels(self, dtype): return np.array(signal_channels, dtype = dtype) def _generate_additional_annotations(self): + ''' + Add file metadata to all blocks and segments + ''' for block_index in range(self.header['nb_block']): bl_annotations = self.raw_annotations["blocks"][block_index] bl_annotations['date'] = self.segments_properties[0]['date'] @@ -790,6 +848,9 @@ def _get_analogsignal_chunk(self, i_stop: int = None, stream_index: int = None, channel_indexes: np.ndarray | list | slice = None): + ''' + Read a chunk of signal from the memmap + ''' if block_index >= self.header['nb_block']: raise IndexError(f"Block Index out of range. There are {self.header['nb_block']} blocks in the file") if seg_index >= self.header['nb_segment'][block_index]: @@ -844,6 +905,9 @@ def _get_analogsignal_chunk(self, return data def _segment_t_start(self, block_index: int, seg_index: int): + ''' + Get start time for a given segment + ''' all_starts = [] for block_index in range(self.header['nb_block']): bl_annotation = self.raw_annotations["blocks"][block_index] @@ -856,6 +920,9 @@ def _segment_t_start(self, block_index: int, seg_index: int): return all_starts[block_index][seg_index] def _segment_t_stop(self, block_index: int, seg_index: int): + ''' + Get stop time for a given segment + ''' all_stops = [] for block_index in range(self.header['nb_block']): bl_annotation = self.raw_annotations["blocks"][block_index] @@ -868,6 +935,9 @@ def _segment_t_stop(self, block_index: int, seg_index: int): return all_stops[block_index][seg_index] def _get_signal_size(self, block_index: int = 0, seg_index: int = 0, stream_index: int = 0): + ''' + Get the maximum number of samples in a channel for a given stream and given segment + ''' return max(self.get_nr_samples(block_index = block_index, seg_index = seg_index, stream_index = stream_index)) @@ -887,6 +957,9 @@ def _rescale_spike_timestamp(self, spike_timestamps: np.ndarray, dtype: np.dtype return None def _event_count(self, block_index: int = 0, seg_index: int = 0, event_channel_index: int = 0): + ''' + Get the number of events for a given segment and event channel + ''' return len([event for event in self.events if (event['block_index'] == block_index and event['seg_index'] == seg_index and event['type'] == str(event_channel_index))]) @@ -894,6 +967,9 @@ def _event_count(self, block_index: int = 0, seg_index: int = 0, event_channel_i def _get_event_timestamps( self, block_index: int = 0, seg_index: int = 0, event_channel_index: int = 0, t_start: float = None, t_stop: float = None ): + ''' + Get timestamps of all events for a given segment and event channel. Optionally, provide a time range + ''' events = [event for event in self.events if event['type'] == str(event_channel_index) and event['seg_index'] == seg_index] timestamp = np.array([event['timestamp'] for event in events], dtype="float64") durations = np.array([event['duration'] for event in events], dtype="float64") @@ -917,6 +993,9 @@ def _rescale_epoch_duration(self, raw_duration: np.ndarray, dtype: np.dtype, eve return durations def _get_index_instances(self, id_str = '', tag = ''): + ''' + Return the main index information for an id_string or a tag + ''' identifier = 'id_str' if tag: identifier = 'tag' @@ -936,9 +1015,15 @@ def _get_index_instances(self, id_str = '', tag = ''): return(idx_instance) def _get_analogsignal_buffer_description(self, block_index, seg_index, buffer_id): + ''' + Get the description of a signal buffer for a given segment + ''' return self._buffer_descriptions[block_index][seg_index][buffer_id] def read_as_dict(fid, dtype): + ''' + Read bytes from the given binary file and return the results as a dictinary + ''' info = dict() dt = np.dtype(dtype) h = np.frombuffer(fid.read(dt.itemsize), dt)[0] @@ -949,6 +1034,9 @@ def read_as_dict(fid, dtype): return info def read_as_list(fid, dtype): + ''' + Read bytes from the given binary file and return the results as a list + ''' dt = np.dtype(dtype) if dt.itemsize == 0: return [] @@ -957,12 +1045,18 @@ def read_as_list(fid, dtype): return h def _process_bytes(byte_data, data_type): + ''' + Concatenate list of byte data into a single string and decode string data + ''' is_list_of_binaries = (type(byte_data) == np.ndarray and type(byte_data[0]) == np.bytes_) byte_obj = b''.join(byte_data) if is_list_of_binaries else byte_data bytes_decoded = _decode_string(byte_obj) if data_type.kind == "S" or is_list_of_binaries else byte_obj return bytes_decoded def _decode_string(string): + ''' + Decode string data + ''' try: string = string.decode("utf8") except: @@ -973,18 +1067,32 @@ def _decode_string(string): def _convert_to_guid(hex_list, guid_format = '{3}{2}{1}{0}-{5}{4}-{7}{6}-{8}{9}-{10}{11}{12}{13}{14}{15}'): + ''' + Shuffel around a list of hexadecimal numbers into the given guid_format + ''' dec_list = [f'{nr:x}'.upper().rjust(2, '0') for nr in hex_list] return('{' + guid_format.format(*dec_list) + '}') -def _convert_to_date(data_float, origin = '30-12-1899'): #Set Origin to 1 day back to account for OLE considering 1900 as a leap year +def _convert_to_date(data_float, origin = '30-12-1899'): + ''' + Convert a OLE float to datetime. + Set Origin to 1 day back to account for OLE considering 1900 as a leap year + ''' return(datetime.strptime(origin, '%d-%m-%Y') + timedelta(seconds = int(data_float*24*60*60))) def _typecast(data, dtype_in = np.uint8, dtype_out = np.uint32): + ''' + Change the datatype of given data + ''' data = np.array(data, dtype = dtype_in) return(data.view(dtype_out)) def _transform_ts_properties(data, dtype): + ''' + For some timestream properties, if the list contains 1 floating-point number return it as a value. + Else, paste all entries it into a single string + ''' cast_list = list(_typecast(data, dtype_out = dtype)) if dtype == np.float64: [cast_list] = cast_list @@ -993,11 +1101,17 @@ def _transform_ts_properties(data, dtype): return(_transform_char(cast_list)) def _transform_char(line): + ''' + paste all entries in a given list together + ''' if type(line) != list: line = [line] line = ''.join([chr(item) for item in line if chr(item) != '\x00']) return line def _read_ts_properties(data, offset, internal_offset, dtype): + ''' + Read timestream properties from some data, given an offset, and process the timestream properties + ''' offset_modifier = 8 if (dtype == np.float64) else 2 top_range = offset + internal_offset + offset_modifier value = _transform_ts_properties(data[(offset + internal_offset):top_range], dtype) @@ -1005,6 +1119,9 @@ def _read_ts_properties(data, offset, internal_offset, dtype): return(value, internal_offset) def _get_relevant_section(lengths_list, to_compare): + ''' + Get the section that contains the given sampling point + ''' try: segment = min([j for j, length in enumerate(lengths_list) if length > to_compare]) except ValueError: From 192c48fce51b6283e24d8b218035d0cb4b842047 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Fri, 4 Jul 2025 08:23:44 +0200 Subject: [PATCH 59/63] Make function into class methods --- neo/rawio/nicoletrawio.py | 359 +++++++++++++++++++------------------- 1 file changed, 180 insertions(+), 179 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index 7f0c5c204..e523a2903 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -206,9 +206,9 @@ def _get_tags(self): with open(self.filename, "rb") as fid: fid.seek(172) - n_tags = read_as_list(fid, + n_tags = self.read_as_list(fid, [('n_tags', 'uint32')]) - tags = [read_as_dict(fid, + tags = [self.read_as_dict(fid, tags_structure) for _ in range(n_tags)] for entry in tags: try: @@ -232,7 +232,7 @@ def _get_qi(self): ] with open(self.filename, "rb") as fid: fid.seek(172208) - qi = read_as_dict(fid, + qi = self.read_as_dict(fid, qi_structure) self.qi = qi @@ -246,10 +246,10 @@ def _get_main_index(self): with open(self.filename, "rb") as fid: while current_index < self.qi['n_entries']: fid.seek(next_index_pointer) - nr_index = read_as_list(fid, + nr_index = self.read_as_list(fid, [('nr_index', 'uint64')] ) - var = read_as_list(fid, + var = self.read_as_list(fid, [('var', 'uint64', int(3*nr_index))]) for i in range(nr_index): main_index.append({ @@ -258,7 +258,7 @@ def _get_main_index(self): 'block_l' : int(var[3*(i)+2] % 2**32), 'section_l' : round(var[3*(i)+2]/(2**32)), }) - next_index_pointer = read_as_list(fid, + next_index_pointer = self.read_as_list(fid, [('next_index_pointer', 'uint64')]) current_index = current_index + (i + 1) self.main_index = main_index @@ -283,10 +283,10 @@ def _read_dynamic_packets(self): fid.seek(offset) for i in range(self.n_dynamic_packets): guid_offset = offset + (i+1)*48 - dynamic_packet = read_as_dict(fid, + dynamic_packet = self.read_as_dict(fid, dynamic_packet_structure) - dynamic_packet['date'] = _convert_to_date(dynamic_packet['date']) - guid_as_str = _convert_to_guid(dynamic_packet['guid_list']) + dynamic_packet['date'] = self._convert_to_date(dynamic_packet['date']) + guid_as_str = self._convert_to_guid(dynamic_packet['guid_list']) if guid_as_str in list(self.TAGS_DICT.keys()): id_str = self.TAGS_DICT[guid_as_str] else: @@ -319,7 +319,7 @@ def _get_dynamic_packets_data(self): read_length = stop_at - start_at file_pos_start = current_instance['offset'] + start_at - internal_offset fid.seek(int(file_pos_start)) - data_part = read_as_list(fid, + data_part = self.read_as_list(fid, [('data', 'uint8', read_length)]) data = data + list(data_part) remaining_data_to_read = remaining_data_to_read - read_length @@ -340,28 +340,28 @@ def _get_patient_guid(self): ] with open(self.filename, "rb") as fid: fid.seek(idx_instance['offset']) - patient_info = read_as_dict(fid, + patient_info = self.read_as_dict(fid, patient_info_structure ) for i in range(patient_info['n_values']): - id_temp = read_as_list(fid, + id_temp = self.read_as_list(fid, [('value', 'uint64')]) if id_temp in [7, 8]: - value = read_as_list(fid, + value = self.read_as_list(fid, [('value', 'float64')]) - value = _convert_to_date(value) + value = self._convert_to_date(value) elif id_temp in [23, 24]: - value = read_as_list(fid, + value = self.read_as_list(fid, [('value', 'float64')]) else: value = 0 patient_info[self.INFO_PROPS[int(id_temp) - 1]] = value if patient_info['n_bstr'] != 0: - str_setup = read_as_list(fid, + str_setup = self.read_as_list(fid, [('setup', 'uint64', int(patient_info['n_bstr']*2))]) for i in range(0, int(patient_info['n_bstr']*2), 2): id_temp = str_setup[i] - value = ''.join([read_as_list(fid, + value = ''.join([self.read_as_list(fid, [('value', 'S2')]) for _ in range(int(str_setup[i + 1]) + 1)]).strip() patient_info[self.INFO_PROPS[int(id_temp) - 1]] = value @@ -392,19 +392,19 @@ def _get_signal_properties(self): for instance in idx_instances: with open(self.filename, "rb") as fid: fid.seek(instance['offset']) - signal_structure = read_as_dict(fid, + signal_structure = self.read_as_dict(fid, signal_structure_segment) - unknown = read_as_list(fid, + unknown = self.read_as_list(fid, [('unknown', 'S1', 152)]) fid.seek(512,1) - n_idx = read_as_dict(fid, + n_idx = self.read_as_dict(fid, [('n_idx', 'uint16'), ('misc1', 'uint16', 3)]) for i in range(n_idx['n_idx']): - properties = read_as_dict(fid, + properties = self.read_as_dict(fid, signal_properties_segment) signal_properties.append(properties) - reserved = read_as_list(fid, + reserved = self.read_as_list(fid, [('reserved', 'S1', 256)]) self.signal_structure = signal_structure self.signal_properties = signal_properties @@ -426,13 +426,13 @@ def _get_channel_info(self): idx_instance = self._get_index_instances('CHANNELGUID')[0] with open(self.filename, "rb") as fid: fid.seek(idx_instance['offset']) - channel_structure = read_as_dict(fid, + channel_structure = self.read_as_dict(fid, channel_structure_structure[0]) fid.seek(152, 1) - channel_structure = channel_structure | read_as_dict(fid, + channel_structure = channel_structure | self.read_as_dict(fid, channel_structure_structure[1]) fid.seek(488,1) - n_index = read_as_list(fid, + n_index = self.read_as_list(fid, [('n_index', 'int32', 2)]) current_index = 0 for i in range(n_index[1]): @@ -443,7 +443,7 @@ def _get_channel_info(self): ('l_input_id', 'uint32'), ('l_input_setting_id', 'uint32'), ] - info = read_as_dict(fid, + info = self.read_as_dict(fid, channel_properties_structure) fid.seek(128, 1) if info['on']: @@ -453,7 +453,7 @@ def _get_channel_info(self): index_id = -1 info['index_id'] = index_id channel_properties.append(info) - reserved = read_as_list(fid, + reserved = self.read_as_list(fid, [('reserved', 'S1', 4)]) self.channel_structure = channel_structure self.channel_properties = channel_properties @@ -467,27 +467,27 @@ def _get_ts_properties_all(self): l_ts_packets = len(ts_packets) for ts_packet in ts_packets: ts_properties = [] - elems = _typecast(ts_packet['data'][752:756])[0] - alloc = _typecast(ts_packet['data'][756:760])[0] + elems = self._typecast(ts_packet['data'][752:756])[0] + alloc = self._typecast(ts_packet['data'][756:760])[0] offset = 760 for i in range(elems): internal_offset = 0 top_range = (offset + self.TSLABELSIZE) - label = _transform_ts_properties(ts_packet['data'][offset:top_range], np.uint16) + label = self._transform_ts_properties(ts_packet['data'][offset:top_range], np.uint16) internal_offset += 2*self.TSLABELSIZE top_range = offset + internal_offset + self.LABELSIZE - active_sensor = _transform_ts_properties(ts_packet['data'][(offset + internal_offset):top_range], np.uint16) + active_sensor = self._transform_ts_properties(ts_packet['data'][(offset + internal_offset):top_range], np.uint16) internal_offset = internal_offset + self.TSLABELSIZE; top_range = offset + internal_offset + 8 - ref_sensor = _transform_ts_properties(ts_packet['data'][(offset + internal_offset):top_range], np.uint16) + ref_sensor = self._transform_ts_properties(ts_packet['data'][(offset + internal_offset):top_range], np.uint16) internal_offset += 64 - low_cut, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) - high_cut, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) - sampling_rate, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) - resolution, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) - mark, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.uint16) - notch, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.uint16) - eeg_offset, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) + low_cut, internal_offset = self._read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) + high_cut, internal_offset = self._read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) + sampling_rate, internal_offset = self._read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) + resolution, internal_offset = self._read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) + mark, internal_offset = self._read_ts_properties(ts_packet['data'], offset, internal_offset, np.uint16) + notch, internal_offset = self._read_ts_properties(ts_packet['data'], offset, internal_offset, np.uint16) + eeg_offset, internal_offset = self._read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) offset += 552 ts_properties.append({ 'label' : label, @@ -519,27 +519,27 @@ def _get_ts_properties(self, ts_packet_index = 0): if l_ts_packets > 1: warnings.warn(f'{l_ts_packets} TSinfo packets detected; using first instance for all segments. See documentation for info') ts_packet = ts_packets[ts_packet_index] - elems = _typecast(ts_packet['data'][752:756])[0] - alloc = _typecast(ts_packet['data'][756:760])[0] + elems = self._typecast(ts_packet['data'][752:756])[0] + alloc = self._typecast(ts_packet['data'][756:760])[0] offset = 760 for i in range(elems): internal_offset = 0 top_range = (offset + self.TSLABELSIZE) - label = _transform_ts_properties(ts_packet['data'][offset:top_range], np.uint16) + label = self._transform_ts_properties(ts_packet['data'][offset:top_range], np.uint16) internal_offset += 2*self.TSLABELSIZE top_range = offset + internal_offset + self.LABELSIZE - active_sensor = _transform_ts_properties(ts_packet['data'][(offset + internal_offset):top_range], np.uint16) + active_sensor = self._transform_ts_properties(ts_packet['data'][(offset + internal_offset):top_range], np.uint16) internal_offset = internal_offset + self.TSLABELSIZE; top_range = offset + internal_offset + 8 - ref_sensor = _transform_ts_properties(ts_packet['data'][(offset + internal_offset):top_range], np.uint16) + ref_sensor = self._transform_ts_properties(ts_packet['data'][(offset + internal_offset):top_range], np.uint16) internal_offset += 64; - low_cut, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) - high_cut, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) - sampling_rate, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) - resolution, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) - mark, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.uint16) - notch, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.uint16) - eeg_offset, internal_offset = _read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) + low_cut, internal_offset = self._read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) + high_cut, internal_offset = self._read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) + sampling_rate, internal_offset = self._read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) + resolution, internal_offset = self._read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) + mark, internal_offset = self._read_ts_properties(ts_packet['data'], offset, internal_offset, np.uint16) + notch, internal_offset = self._read_ts_properties(ts_packet['data'], offset, internal_offset, np.uint16) + eeg_offset, internal_offset = self._read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) offset += 552 ts_properties.append({ 'label' : label, @@ -566,10 +566,10 @@ def _get_segment_start_times(self): fid.seek(segment_instance['offset'], 0) for i in range(n_segments): segment_info = {} - segment_info['date_ole'] = read_as_list(fid, + segment_info['date_ole'] = self.read_as_list(fid, [('date', 'float64')]) fid.seek(8,1) - segment_info['duration'] = read_as_list(fid, + segment_info['duration'] = self.read_as_list(fid, [('duration', 'float64')]) fid.seek(128, 1) segment_info['ch_names'] = [info['label'] for info in self.ts_properties] @@ -592,7 +592,7 @@ def _get_events(self): ''' events = [] event_packet_guid = '{B799F680-72A4-11D3-93D3-00500400C148}' - event_instances = _ensure_list(self._get_index_instances(tag = 'Events')) + event_instances = self._ensure_list(self._get_index_instances(tag = 'Events')) for instance in event_instances: offset = instance['offset'] with open(self.filename, "rb") as fid: @@ -601,9 +601,9 @@ def _get_events(self): ('len', 'uint64'), ] fid.seek(offset) - pkt = read_as_dict(fid, + pkt = self.read_as_dict(fid, pkt_structure) - pkt['guid'] = _convert_to_guid(pkt['guid']) + pkt['guid'] = self._convert_to_guid(pkt['guid']) n_events = 0 while (pkt['guid'] == event_packet_guid): event_structure = [ @@ -620,24 +620,24 @@ def _get_events(self): n_events += 1 try: fid.seek(8, 1) - event = read_as_dict(fid, + event = self.read_as_dict(fid, event_structure[0]) fid.seek(48, 1) - event = event | read_as_dict(fid, + event = event | self.read_as_dict(fid, event_structure[1]) fid.seek(16, 1) - event = event | read_as_dict(fid, + event = event | self.read_as_dict(fid, event_structure[2]) event['date'] = self._convert_ole_to_datetime(event['date_ole'], event['date_fraction']) event['timestamp'] = (event['date'] - self.segments_properties[0]['date']).total_seconds() - event['guid'] = _convert_to_guid(event['guid']) + event['guid'] = self._convert_to_guid(event['guid']) try: id_str = self.HC_EVENT[event['guid']] except: id_str = 'UNKNOWN' if id_str == 'Annotation' or id_str == 'Event Comment': fid.seek(31, 1) - annotation = read_as_list(fid, + annotation = self.read_as_list(fid, [('annotation', 'S2', event['text_length'])]) else: annotation = '' @@ -657,9 +657,9 @@ def _get_events(self): break offset += int(pkt['len']) fid.seek(offset) - pkt = read_as_dict(fid, + pkt = self.read_as_dict(fid, pkt_structure) - pkt['guid'] = _convert_to_guid(pkt['guid']) + pkt['guid'] = self._convert_to_guid(pkt['guid']) self.events = events pass @@ -687,14 +687,14 @@ def _get_montage(self): ], ] fid.seek(int(montage_instances[0]['offset']) + 40) - montage_info = read_as_dict(fid, + montage_info = self.read_as_dict(fid, montage_info_structure[0]) fid.seek(640, 1) - montage_info = montage_info | read_as_dict(fid, + montage_info = montage_info | self.read_as_dict(fid, montage_info_structure[1]) for i in range(montage_info['n_derivations']): - montage = montage_info | read_as_dict(fid, + montage = montage_info | self.read_as_dict(fid, montage_info_structure[2]) fid.seek(264, 1) montages.append(montage) @@ -707,16 +707,16 @@ def _get_montage(self): [('color', 'uint32')], ] fid.seek(int(display_instances[0]['offset']) + 40) - display = read_as_dict(fid, + display = self.read_as_dict(fid, display_structure[0]) fid.seek(640, 1) - display = display | read_as_dict(fid, + display = display | self.read_as_dict(fid, display_structure[1]) if display['n_traces'] == montage_info['n_derivations']: for i in range(display['n_traces']): fid.seek(32, 1) montages[i]['disp_name'] = display['name'] - montages[i]['color'] = read_as_list(fid, + montages[i]['color'] = self.read_as_list(fid, display_structure[2]) else: print('Could not match montage derivations with display color table') @@ -760,7 +760,7 @@ def _get_buffer_descriptions(self): [tag_idx] = [tag['index'] for tag in self.tags if tag['tag'] == '0'] all_sections = [j for j, idx_id in enumerate(self.all_section_ids) if idx_id == tag_idx] section_lengths = [0] + list(np.cumsum([int(index['section_l']/2) for j, index in enumerate(self.main_index) if j in all_sections])) - first_section_for_seg = _get_relevant_section(section_lengths, skip_values) - 1 + first_section_for_seg = self._get_relevant_section(section_lengths, skip_values) - 1 offset = self.main_index[all_sections[first_section_for_seg]]['offset'] shape = (max(self.get_nr_samples(seg_index = seg_index)), segment['sampling_rates'].count(segment['sampling_rates'][0])) @@ -884,8 +884,8 @@ def _get_analogsignal_chunk(self, section_lengths = [int(index['section_l']/2) for j, index in enumerate(self.main_index) if j in all_sections] cum_section_lengths = [0] + list(np.cumsum(section_lengths)) skip_values = cum_segment_duration[seg_index] * current_samplingrate - first_section_for_seg = _get_relevant_section(cum_section_lengths, skip_values) - 1 - last_section_for_seg = _get_relevant_section(cum_section_lengths, + first_section_for_seg = self._get_relevant_section(cum_section_lengths, skip_values) - 1 + last_section_for_seg = self._get_relevant_section(cum_section_lengths, current_samplingrate* self.segments_properties[seg_index]['duration'].total_seconds()) - 1 + first_section_for_seg use_sections = all_sections[first_section_for_seg:last_section_for_seg] @@ -1020,119 +1020,120 @@ def _get_analogsignal_buffer_description(self, block_index, seg_index, buffer_id ''' return self._buffer_descriptions[block_index][seg_index][buffer_id] -def read_as_dict(fid, dtype): - ''' - Read bytes from the given binary file and return the results as a dictinary - ''' - info = dict() - dt = np.dtype(dtype) - h = np.frombuffer(fid.read(dt.itemsize), dt)[0] - for k in dt.names: - v = h[k] - v = _process_bytes(v, dt[k]) - info[k] = v - return info + def read_as_dict(self, fid, dtype): + ''' + Read bytes from the given binary file and return the results as a dictinary + ''' + info = dict() + dt = np.dtype(dtype) + h = np.frombuffer(fid.read(dt.itemsize), dt)[0] + for k in dt.names: + v = h[k] + v = self._process_bytes(v, dt[k]) + info[k] = v + return info -def read_as_list(fid, dtype): - ''' - Read bytes from the given binary file and return the results as a list - ''' - dt = np.dtype(dtype) - if dt.itemsize == 0: - return [] - h = np.frombuffer(fid.read(dt.itemsize), dt)[0][0] - h = _process_bytes(h, dt[0]) - return h - -def _process_bytes(byte_data, data_type): - ''' - Concatenate list of byte data into a single string and decode string data - ''' - is_list_of_binaries = (type(byte_data) == np.ndarray and type(byte_data[0]) == np.bytes_) - byte_obj = b''.join(byte_data) if is_list_of_binaries else byte_data - bytes_decoded = _decode_string(byte_obj) if data_type.kind == "S" or is_list_of_binaries else byte_obj - return bytes_decoded + def read_as_list(self, fid, dtype): + ''' + Read bytes from the given binary file and return the results as a list + ''' + dt = np.dtype(dtype) + if dt.itemsize == 0: + return [] + h = np.frombuffer(fid.read(dt.itemsize), dt)[0][0] + h = self._process_bytes(h, dt[0]) + return h + + def _process_bytes(self, byte_data, data_type): + ''' + Concatenate list of byte data into a single string and decode string data + ''' + is_list_of_binaries = (type(byte_data) == np.ndarray and type(byte_data[0]) == np.bytes_) + byte_obj = b''.join(byte_data) if is_list_of_binaries else byte_data + bytes_decoded = self._decode_string(byte_obj) if data_type.kind == "S" or is_list_of_binaries else byte_obj + return bytes_decoded -def _decode_string(string): - ''' - Decode string data - ''' - try: - string = string.decode("utf8") - except: - string = string.decode('latin_1') - string = string.replace("\x03", "") - string = string.replace("\x00", "") - return string + def _decode_string(self, string): + ''' + Decode string data + ''' + try: + string = string.decode("utf8") + except: + string = string.decode('latin_1') + string = string.replace("\x03", "") + string = string.replace("\x00", "") + return string -def _convert_to_guid(hex_list, - guid_format = '{3}{2}{1}{0}-{5}{4}-{7}{6}-{8}{9}-{10}{11}{12}{13}{14}{15}'): - ''' - Shuffel around a list of hexadecimal numbers into the given guid_format - ''' - dec_list = [f'{nr:x}'.upper().rjust(2, '0') for nr in hex_list] - return('{' + guid_format.format(*dec_list) + '}') + def _convert_to_guid(self, + hex_list, + guid_format = '{3}{2}{1}{0}-{5}{4}-{7}{6}-{8}{9}-{10}{11}{12}{13}{14}{15}'): + ''' + Shuffel around a list of hexadecimal numbers into the given guid_format + ''' + dec_list = [f'{nr:x}'.upper().rjust(2, '0') for nr in hex_list] + return('{' + guid_format.format(*dec_list) + '}') -def _convert_to_date(data_float, origin = '30-12-1899'): - ''' - Convert a OLE float to datetime. - Set Origin to 1 day back to account for OLE considering 1900 as a leap year - ''' - return(datetime.strptime(origin, '%d-%m-%Y') - + timedelta(seconds = int(data_float*24*60*60))) + def _convert_to_date(self, data_float, origin = '30-12-1899'): + ''' + Convert a OLE float to datetime. + Set Origin to 1 day back to account for OLE considering 1900 as a leap year + ''' + return(datetime.strptime(origin, '%d-%m-%Y') + + timedelta(seconds = int(data_float*24*60*60))) -def _typecast(data, dtype_in = np.uint8, dtype_out = np.uint32): - ''' - Change the datatype of given data - ''' - data = np.array(data, dtype = dtype_in) - return(data.view(dtype_out)) + def _typecast(self, data, dtype_in = np.uint8, dtype_out = np.uint32): + ''' + Change the datatype of given data + ''' + data = np.array(data, dtype = dtype_in) + return(data.view(dtype_out)) -def _transform_ts_properties(data, dtype): - ''' - For some timestream properties, if the list contains 1 floating-point number return it as a value. - Else, paste all entries it into a single string - ''' - cast_list = list(_typecast(data, dtype_out = dtype)) - if dtype == np.float64: - [cast_list] = cast_list - return(cast_list) - else: - return(_transform_char(cast_list)) - -def _transform_char(line): - ''' - paste all entries in a given list together - ''' - if type(line) != list: line = [line] - line = ''.join([chr(item) for item in line if chr(item) != '\x00']) - return line + def _transform_ts_properties(self, data, dtype): + ''' + For some timestream properties, if the list contains 1 floating-point number return it as a value. + Else, paste all entries it into a single string + ''' + cast_list = list(self._typecast(data, dtype_out = dtype)) + if dtype == np.float64: + [cast_list] = cast_list + return(cast_list) + else: + return(self._transform_char(cast_list)) + + def _transform_char(self, line): + ''' + paste all entries in a given list together + ''' + if type(line) != list: line = [line] + line = ''.join([chr(item) for item in line if chr(item) != '\x00']) + return line -def _read_ts_properties(data, offset, internal_offset, dtype): - ''' - Read timestream properties from some data, given an offset, and process the timestream properties - ''' - offset_modifier = 8 if (dtype == np.float64) else 2 - top_range = offset + internal_offset + offset_modifier - value = _transform_ts_properties(data[(offset + internal_offset):top_range], dtype) - internal_offset += offset_modifier - return(value, internal_offset) + def _read_ts_properties(self, data, offset, internal_offset, dtype): + ''' + Read timestream properties from some data, given an offset, and process the timestream properties + ''' + offset_modifier = 8 if (dtype == np.float64) else 2 + top_range = offset + internal_offset + offset_modifier + value = self._transform_ts_properties(data[(offset + internal_offset):top_range], dtype) + internal_offset += offset_modifier + return(value, internal_offset) -def _get_relevant_section(lengths_list, to_compare): - ''' - Get the section that contains the given sampling point - ''' - try: - segment = min([j for j, length in enumerate(lengths_list) if length > to_compare]) - except ValueError: - segment = len(lengths_list) - return(segment) + def _get_relevant_section(self, lengths_list, to_compare): + ''' + Get the section that contains the given sampling point + ''' + try: + segment = min([j for j, length in enumerate(lengths_list) if length > to_compare]) + except ValueError: + segment = len(lengths_list) + return(segment) -def _ensure_list(output): - """ - Ensure the output is a list. If it is a single element, wrap it in a list. - If it is already a list, return it as is. - """ - if not isinstance(output, list): - return [output] - return output \ No newline at end of file + def _ensure_list(self, output): + """ + Ensure the output is a list. If it is a single element, wrap it in a list. + If it is already a list, return it as is. + """ + if not isinstance(output, list): + return [output] + return output \ No newline at end of file From cd5e3fd19c9d07bce8deafe19e7f6481d52b7338 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Tue, 8 Jul 2025 16:48:08 +0200 Subject: [PATCH 60/63] Read all ts properties --- neo/rawio/nicoletrawio.py | 61 ++++----------------------------------- 1 file changed, 6 insertions(+), 55 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index e523a2903..119802ae2 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -458,26 +458,25 @@ def _get_channel_info(self): self.channel_structure = channel_structure self.channel_properties = channel_properties - def _get_ts_properties_all(self): + def _get_ts_properties(self, ts_packet_index = 0): ''' - DEPECRATED + Read properties of every timestream. + Currently, only the first instance of the timestream is used for every segment ''' ts_packets_properties = [] ts_packets = [packet for packet in self.dynamic_packets if packet['id_str'] == 'TSGUID'] - l_ts_packets = len(ts_packets) for ts_packet in ts_packets: ts_properties = [] elems = self._typecast(ts_packet['data'][752:756])[0] - alloc = self._typecast(ts_packet['data'][756:760])[0] offset = 760 - for i in range(elems): + for _ in range(elems): internal_offset = 0 top_range = (offset + self.TSLABELSIZE) label = self._transform_ts_properties(ts_packet['data'][offset:top_range], np.uint16) internal_offset += 2*self.TSLABELSIZE top_range = offset + internal_offset + self.LABELSIZE active_sensor = self._transform_ts_properties(ts_packet['data'][(offset + internal_offset):top_range], np.uint16) - internal_offset = internal_offset + self.TSLABELSIZE; + internal_offset = internal_offset + self.TSLABELSIZE top_range = offset + internal_offset + 8 ref_sensor = self._transform_ts_properties(ts_packet['data'][(offset + internal_offset):top_range], np.uint16) internal_offset += 64 @@ -504,57 +503,9 @@ def _get_ts_properties_all(self): ts_packets_properties.append(ts_properties) self.ts_packets = ts_packets self.ts_packets_properties = ts_packets_properties + self.ts_properties = ts_packets_properties[ts_packet_index] pass - def _get_ts_properties(self, ts_packet_index = 0): - ''' - Read properties of every timestream. - So far, only the first instance of the timestream is used for every segment - ''' - ts_properties = [] - ts_packets = [packet for packet in self.dynamic_packets if packet['id_str'] == 'TSGUID'] - l_ts_packets = len(ts_packets) - self.ts_packets = ts_packets - if l_ts_packets > 0: - if l_ts_packets > 1: - warnings.warn(f'{l_ts_packets} TSinfo packets detected; using first instance for all segments. See documentation for info') - ts_packet = ts_packets[ts_packet_index] - elems = self._typecast(ts_packet['data'][752:756])[0] - alloc = self._typecast(ts_packet['data'][756:760])[0] - offset = 760 - for i in range(elems): - internal_offset = 0 - top_range = (offset + self.TSLABELSIZE) - label = self._transform_ts_properties(ts_packet['data'][offset:top_range], np.uint16) - internal_offset += 2*self.TSLABELSIZE - top_range = offset + internal_offset + self.LABELSIZE - active_sensor = self._transform_ts_properties(ts_packet['data'][(offset + internal_offset):top_range], np.uint16) - internal_offset = internal_offset + self.TSLABELSIZE; - top_range = offset + internal_offset + 8 - ref_sensor = self._transform_ts_properties(ts_packet['data'][(offset + internal_offset):top_range], np.uint16) - internal_offset += 64; - low_cut, internal_offset = self._read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) - high_cut, internal_offset = self._read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) - sampling_rate, internal_offset = self._read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) - resolution, internal_offset = self._read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) - mark, internal_offset = self._read_ts_properties(ts_packet['data'], offset, internal_offset, np.uint16) - notch, internal_offset = self._read_ts_properties(ts_packet['data'], offset, internal_offset, np.uint16) - eeg_offset, internal_offset = self._read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) - offset += 552 - ts_properties.append({ - 'label' : label, - 'active_sensor' : active_sensor, - 'ref_sensor' : ref_sensor, - 'low_cut' : low_cut, - 'high_cut' : high_cut, - 'sampling_rate' : sampling_rate, - 'resolution' : resolution, - 'notch' : notch, - 'mark' : mark, - 'eeg_offset' : eeg_offset, - }) - self.ts_properties = ts_properties - def _get_segment_start_times(self): ''' Get the start and stop times and the duration of each segment From 67b714946e51dbb890076b31b2674fcb90693a78 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Thu, 10 Jul 2025 17:25:17 +0200 Subject: [PATCH 61/63] Change create signal channels to use channel_properties as basis and pull information from signal and timestream properties as needed --- neo/rawio/nicoletrawio.py | 51 +++++++++++++++++++++------------------ 1 file changed, 27 insertions(+), 24 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index 119802ae2..ac1192665 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -179,7 +179,7 @@ def _parse_header(self): self._extract_header_information() self.header = {} self.header["nb_block"] = 1 - self.header["nb_segment"] = [len(self.segments_properties)] + self.header["nb_segment"] = [int(self._get_index_instances('SegmentStream')[0]['section_l']/152)] self.header["signal_buffers"] = np.array(['Signals', '0'], dtype=_signal_buffer_dtype) self.header["signal_channels"] = self._create_signal_channels(_signal_channel_dtype) @@ -191,6 +191,11 @@ def _parse_header(self): self.header["event_channels"] = np.array([("Events", "0", "event"), ("Epochs", "1", "epoch")], dtype = _event_channel_dtype) + + self._get_segment_start_times() + self._get_events() + self._get_montage() + self._get_raw_signal() self._generate_minimal_annotations() self._generate_additional_annotations() self._get_buffer_descriptions() @@ -519,17 +524,15 @@ def _get_segment_start_times(self): segment_info = {} segment_info['date_ole'] = self.read_as_list(fid, [('date', 'float64')]) + date_str = self._convert_ole_to_datetime(segment_info['date_ole']) fid.seek(8,1) segment_info['duration'] = self.read_as_list(fid, [('duration', 'float64')]) fid.seek(128, 1) - segment_info['ch_names'] = [info['label'] for info in self.ts_properties] - segment_info['ref_names'] = [info['ref_sensor'] for info in self.ts_properties] - segment_info['sampling_rates'] = [info['sampling_rate'] for info in self.ts_properties] - segment_info['scale'] = [info['resolution'] for info in self.ts_properties] - date_str = self._convert_ole_to_datetime(segment_info['date_ole']) - start_date = date_str.date() - start_time = date_str.time() + segment_info['ch_names'] = [channel[0] for channel in self.header['signal_channels']] + #segment_info['ref_names'] = [info['ref_sensor'] for info in self.ts_properties] + segment_info['sampling_rates'] = [channel[2] for channel in self.header['signal_channels']] + segment_info['scale'] = [channel[5] for channel in self.header['signal_channels']] segment_info['date'] = date_str segment_info['start_date'] = date_str.date() segment_info['start_time'] = date_str.time() @@ -699,9 +702,6 @@ def _get_raw_signal(self): def _get_buffer_descriptions(self): ''' Get the descriptions of raw signal buffers - - TODO: File offset - TODO: Support for multiple signal streams ''' buffer_id = 0 self._buffer_descriptions = {0: {}} @@ -738,10 +738,6 @@ def _extract_header_information(self): self._get_signal_properties() self._get_channel_info() self._get_ts_properties() - self._get_segment_start_times() - self._get_events() - self._get_montage() - self._get_raw_signal() def _create_signal_channels(self, dtype): ''' @@ -750,22 +746,24 @@ def _create_signal_channels(self, dtype): signal_channels = [] signal_streams = {} stream_id = 0 - for i, timestream in enumerate(self.ts_properties): - signal = next((item for item in self.signal_properties if item["name"] == timestream['label'].split('-')[0]), None) + for i, channel in enumerate(self.channel_properties): + signal = next((item for item in self.signal_properties if item['name'] == channel['sensor']), None) + timestream = next((item for item in self.ts_properties if item['label'] == channel['sensor']), None) if signal is None: continue - if timestream['sampling_rate'] not in signal_streams.keys(): - signal_streams[timestream['sampling_rate']] = stream_id + if channel['sampling_rate'] not in signal_streams.keys(): + signal_streams[channel['sampling_rate']] = stream_id stream_id += 1 + channel['sampling_rate'] signal_channels.append(( - timestream['label'].split('-')[0], + channel['sensor'], i, - int(timestream['sampling_rate']), + int(channel['sampling_rate']), 'int16', signal['transducer'], timestream['resolution'], timestream['eeg_offset'], - signal_streams[timestream['sampling_rate']], + signal_streams[channel['sampling_rate']], '0')) self.signal_streams = signal_streams return np.array(signal_channels, dtype = dtype) @@ -804,8 +802,10 @@ def _get_analogsignal_chunk(self, ''' if block_index >= self.header['nb_block']: raise IndexError(f"Block Index out of range. There are {self.header['nb_block']} blocks in the file") + if seg_index >= self.header['nb_segment'][block_index]: raise IndexError(f"Segment Index out of range. There are {self.header['nb_segment'][block_index]} segments for block {block_index}") + if channel_indexes is None: channel_indexes = [i for i, channel in enumerate(self.header['signal_channels']) if channel['stream_id'] == str(stream_index)] elif isinstance(channel_indexes, slice): @@ -818,16 +818,19 @@ def _get_analogsignal_chunk(self, raise IndexError("Channel Indices cannot be negative") if any(channel_indexes >= self.header['signal_channels'].shape[0]): raise IndexError("Channel Indices out of range") + if i_start is None: i_start = 0 + if i_stop is None: i_stop = max(self.get_nr_samples(seg_index = seg_index, stream_index = stream_index)) + if i_start < 0 or i_stop > max(self.get_nr_samples(seg_index = seg_index, stream_index = stream_index)): #Get the maximum number of samples for the respective sampling rate raise IndexError("Start or Stop Index out of bounds") - current_samplingrate = self.segments_properties[seg_index]['sampling_rates'][channel_indexes[0]] #Non signal-stream specific, just take the sampling rate of the first channel + cum_segment_duration = [0] + list(np.cumsum([(segment['duration'].total_seconds()) for segment in self.segments_properties])) data = np.empty([i_stop - i_start, len(channel_indexes)]) - for i, channel_index in enumerate(channel_indexes): + for i in range(len(channel_indexes)): current_samplingrate = self.segments_properties[seg_index]['sampling_rates'][i] multiplicator = self.segments_properties[seg_index]['scale'][i] [tag_idx] = [tag['index'] for tag in self.tags if tag['tag'] == str(i)] From e50fb53f9e776ac0f6ecadf7a64fff583bc750e9 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Mon, 14 Jul 2025 16:34:42 +0200 Subject: [PATCH 62/63] Reformatting with black --- neo/io/nicoletio.py | 2 +- neo/rawio/nicoletrawio.py | 1386 ++++++++++++----------- neo/test/iotest/test_nicoletio.py | 14 +- neo/test/rawiotest/test_nicoletrawio.py | 5 +- 4 files changed, 727 insertions(+), 680 deletions(-) diff --git a/neo/io/nicoletio.py b/neo/io/nicoletio.py index a14944080..1d872c4d8 100644 --- a/neo/io/nicoletio.py +++ b/neo/io/nicoletio.py @@ -27,4 +27,4 @@ class NicoletIO(NicoletRawIO, BaseFromRaw): def __init__(self, filename=""): NicoletRawIO.__init__(self, filename=filename) - BaseFromRaw.__init__(self, filename) \ No newline at end of file + BaseFromRaw.__init__(self, filename) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index ac1192665..9753ae9fd 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -24,174 +24,173 @@ class NicoletRawIO(BaseRawIO): - ''' + """ The Class to read in .e files produced by the NicoletOne EEG-System. - + Parameters ---------- filename: str, default : None The .e file to be loaded - ''' + """ extensions = ["e"] rawmode = "one-file" - + LABELSIZE = 32 TSLABELSIZE = 64 UNITSIZE = 16 ITEMNAMESIZE = 64 TAGS_DICT = { - 'ExtraDataTags' : 'ExtraDataTags', - 'SegmentStream' : 'SegmentStream', - 'DataStream' : 'DataStream', - 'InfoChangeStream' : 'InfoChangeStream', - 'InfoGuids' : 'InfoGuids', - '{A271CCCB-515D-4590-B6A1-DC170C8D6EE2}' : 'TSGUID', - '{8A19AA48-BEA0-40D5-B89F-667FC578D635}' : 'DERIVATIONGUID', - '{F824D60C-995E-4D94-9578-893C755ECB99}' : 'FILTERGUID', - '{02950361-35BB-4A22-9F0B-C78AAA5DB094}' : 'DISPLAYGUID', - '{8E94EF21-70F5-11D3-8F72-00105A9AFD56}' : 'FILEINFOGUID', - '{E4138BC0-7733-11D3-8685-0050044DAAB1}' : 'SRINFOGUID', - '{C728E565-E5A0-4419-93D2-F6CFC69F3B8F}' : 'EVENTTYPEINFOGUID', - '{D01B34A0-9DBD-11D3-93D3-00500400C148}' : 'AUDIOINFOGUID', - '{BF7C95EF-6C3B-4E70-9E11-779BFFF58EA7}' : 'CHANNELGUID', - '{2DEB82A1-D15F-4770-A4A4-CF03815F52DE}' : 'INPUTGUID', - '{5B036022-2EDC-465F-86EC-C0A4AB1A7A91}' : 'INPUTSETTINGSGUID', - '{99A636F2-51F7-4B9D-9569-C7D45058431A}' : 'PHOTICGUID', - '{55C5E044-5541-4594-9E35-5B3004EF7647}' : 'ERRORGUID', - '{223A3CA0-B5AC-43FB-B0A8-74CF8752BDBE}' : 'VIDEOGUID', - '{0623B545-38BE-4939-B9D0-55F5E241278D}' : 'DETECTIONPARAMSGUID', - '{CE06297D-D9D6-4E4B-8EAC-305EA1243EAB}' : 'PAGEGUID', - '{782B34E8-8E51-4BB9-9701-3227BB882A23}' : 'ACCINFOGUID', - '{3A6E8546-D144-4B55-A2C7-40DF579ED11E}' : 'RECCTRLGUID', - '{D046F2B0-5130-41B1-ABD7-38C12B32FAC3}' : 'GUID TRENDINFOGUID', - '{CBEBA8E6-1CDA-4509-B6C2-6AC2EA7DB8F8}' : 'HWINFOGUID', - '{E11C4CBA-0753-4655-A1E9-2B2309D1545B}' : 'VIDEOSYNCGUID', - '{B9344241-7AC1-42B5-BE9B-B7AFA16CBFA5}' : 'SLEEPSCOREINFOGUID', - '{15B41C32-0294-440E-ADFF-DD8B61C8B5AE}' : 'FOURIERSETTINGSGUID', - '{024FA81F-6A83-43C8-8C82-241A5501F0A1}' : 'SPECTRUMGUID', - '{8032E68A-EA3E-42E8-893E-6E93C59ED515}' : 'SIGNALINFOGUID', - '{30950D98-C39C-4352-AF3E-CB17D5B93DED}' : 'SENSORINFOGUID', - '{F5D39CD3-A340-4172-A1A3-78B2CDBCCB9F}' : 'DERIVEDSIGNALINFOGUID', - '{969FBB89-EE8E-4501-AD40-FB5A448BC4F9}' : 'ARTIFACTINFOGUID', - '{02948284-17EC-4538-A7FA-8E18BD65E167}' : 'STUDYINFOGUID', - '{D0B3FD0B-49D9-4BF0-8929-296DE5A55910}' : 'PATIENTINFOGUID', - '{7842FEF5-A686-459D-8196-769FC0AD99B3}' : 'DOCUMENTINFOGUID', - '{BCDAEE87-2496-4DF4-B07C-8B4E31E3C495}' : 'USERSINFOGUID', - '{B799F680-72A4-11D3-93D3-00500400C148}' : 'EVENTGUID', - '{AF2B3281-7FCE-11D2-B2DE-00104B6FC652}' : 'SHORTSAMPLESGUID', - '{89A091B3-972E-4DA2-9266-261B186302A9}' : 'DELAYLINESAMPLESGUID', - '{291E2381-B3B4-44D1-BB77-8CF5C24420D7}' : 'GENERALSAMPLESGUID', - '{5F11C628-FCCC-4FDD-B429-5EC94CB3AFEB}' : 'FILTERSAMPLESGUID', - '{728087F8-73E1-44D1-8882-C770976478A2}' : 'DATEXDATAGUID', - '{35F356D9-0F1C-4DFE-8286-D3DB3346FD75}' : 'TESTINFOGUID', - } + "ExtraDataTags": "ExtraDataTags", + "SegmentStream": "SegmentStream", + "DataStream": "DataStream", + "InfoChangeStream": "InfoChangeStream", + "InfoGuids": "InfoGuids", + "{A271CCCB-515D-4590-B6A1-DC170C8D6EE2}": "TSGUID", + "{8A19AA48-BEA0-40D5-B89F-667FC578D635}": "DERIVATIONGUID", + "{F824D60C-995E-4D94-9578-893C755ECB99}": "FILTERGUID", + "{02950361-35BB-4A22-9F0B-C78AAA5DB094}": "DISPLAYGUID", + "{8E94EF21-70F5-11D3-8F72-00105A9AFD56}": "FILEINFOGUID", + "{E4138BC0-7733-11D3-8685-0050044DAAB1}": "SRINFOGUID", + "{C728E565-E5A0-4419-93D2-F6CFC69F3B8F}": "EVENTTYPEINFOGUID", + "{D01B34A0-9DBD-11D3-93D3-00500400C148}": "AUDIOINFOGUID", + "{BF7C95EF-6C3B-4E70-9E11-779BFFF58EA7}": "CHANNELGUID", + "{2DEB82A1-D15F-4770-A4A4-CF03815F52DE}": "INPUTGUID", + "{5B036022-2EDC-465F-86EC-C0A4AB1A7A91}": "INPUTSETTINGSGUID", + "{99A636F2-51F7-4B9D-9569-C7D45058431A}": "PHOTICGUID", + "{55C5E044-5541-4594-9E35-5B3004EF7647}": "ERRORGUID", + "{223A3CA0-B5AC-43FB-B0A8-74CF8752BDBE}": "VIDEOGUID", + "{0623B545-38BE-4939-B9D0-55F5E241278D}": "DETECTIONPARAMSGUID", + "{CE06297D-D9D6-4E4B-8EAC-305EA1243EAB}": "PAGEGUID", + "{782B34E8-8E51-4BB9-9701-3227BB882A23}": "ACCINFOGUID", + "{3A6E8546-D144-4B55-A2C7-40DF579ED11E}": "RECCTRLGUID", + "{D046F2B0-5130-41B1-ABD7-38C12B32FAC3}": "GUID TRENDINFOGUID", + "{CBEBA8E6-1CDA-4509-B6C2-6AC2EA7DB8F8}": "HWINFOGUID", + "{E11C4CBA-0753-4655-A1E9-2B2309D1545B}": "VIDEOSYNCGUID", + "{B9344241-7AC1-42B5-BE9B-B7AFA16CBFA5}": "SLEEPSCOREINFOGUID", + "{15B41C32-0294-440E-ADFF-DD8B61C8B5AE}": "FOURIERSETTINGSGUID", + "{024FA81F-6A83-43C8-8C82-241A5501F0A1}": "SPECTRUMGUID", + "{8032E68A-EA3E-42E8-893E-6E93C59ED515}": "SIGNALINFOGUID", + "{30950D98-C39C-4352-AF3E-CB17D5B93DED}": "SENSORINFOGUID", + "{F5D39CD3-A340-4172-A1A3-78B2CDBCCB9F}": "DERIVEDSIGNALINFOGUID", + "{969FBB89-EE8E-4501-AD40-FB5A448BC4F9}": "ARTIFACTINFOGUID", + "{02948284-17EC-4538-A7FA-8E18BD65E167}": "STUDYINFOGUID", + "{D0B3FD0B-49D9-4BF0-8929-296DE5A55910}": "PATIENTINFOGUID", + "{7842FEF5-A686-459D-8196-769FC0AD99B3}": "DOCUMENTINFOGUID", + "{BCDAEE87-2496-4DF4-B07C-8B4E31E3C495}": "USERSINFOGUID", + "{B799F680-72A4-11D3-93D3-00500400C148}": "EVENTGUID", + "{AF2B3281-7FCE-11D2-B2DE-00104B6FC652}": "SHORTSAMPLESGUID", + "{89A091B3-972E-4DA2-9266-261B186302A9}": "DELAYLINESAMPLESGUID", + "{291E2381-B3B4-44D1-BB77-8CF5C24420D7}": "GENERALSAMPLESGUID", + "{5F11C628-FCCC-4FDD-B429-5EC94CB3AFEB}": "FILTERSAMPLESGUID", + "{728087F8-73E1-44D1-8882-C770976478A2}": "DATEXDATAGUID", + "{35F356D9-0F1C-4DFE-8286-D3DB3346FD75}": "TESTINFOGUID", + } INFO_PROPS = [ - 'patientID', - 'firstName', - 'middleName', - 'lastName', - 'altID', - 'mothersMaidenName', - 'DOB', - 'DOD', - 'street', - 'sexID', - 'phone', - 'notes', - 'dominance', - 'siteID', - 'suffix', - 'prefix', - 'degree', - 'apartment', - 'city', - 'state', - 'country', - 'language', - 'height', - 'weight', - 'race', - 'religion', - 'maritalStatus', - ] + "patientID", + "firstName", + "middleName", + "lastName", + "altID", + "mothersMaidenName", + "DOB", + "DOD", + "street", + "sexID", + "phone", + "notes", + "dominance", + "siteID", + "suffix", + "prefix", + "degree", + "apartment", + "city", + "state", + "country", + "language", + "height", + "weight", + "race", + "religion", + "maritalStatus", + ] HC_EVENT = { - '{A5A95612-A7F8-11CF-831A-0800091B5BDA}' : 'Annotation', - '{A5A95646-A7F8-11CF-831A-0800091B5BDA}' : 'Seizure', - '{08784382-C765-11D3-90CE-00104B6F4F70}' : 'Format change', - '{6FF394DA-D1B8-46DA-B78F-866C67CF02AF}' : 'Photic', - '{481DFC97-013C-4BC5-A203-871B0375A519}' : 'Posthyperventilation', - '{725798BF-CD1C-4909-B793-6C7864C27AB7}' : 'Review progress', - '{96315D79-5C24-4A65-B334-E31A95088D55}' : 'Exam start', - '{A5A95608-A7F8-11CF-831A-0800091B5BDA}' : 'Hyperventilation', - '{A5A95617-A7F8-11CF-831A-0800091B5BDA}' : 'Impedance', - '{A5A95645-A7F8-11CF-831A-0800091B5BDA}' : 'Event Comment', - '{C3B68051-EDCF-418C-8D53-27077B92DE22}' : 'Spike', - '{99FFE0AA-B8F9-49E5-8390-8F072F4E00FC}' : 'EEG Check', - '{A5A9560A-A7F8-11CF-831A-0800091B5BDA}' : 'Print', - '{A5A95616-A7F8-11CF-831A-0800091B5BDA}' : 'Patient Event', - '{0DE05C94-7D03-47B9-864F-D586627EA891}' : 'Eyes closed', - '{583AA2C6-1F4E-47CF-A8D4-80C69EB8A5F3}' : 'Eyes open', - '{BAE4550A-8409-4289-9D8A-0D571A206BEC}' : 'Eating', - '{1F3A45A4-4D0F-4CC4-A43A-CAD2BC2D71F2}' : 'ECG', - '{B0BECF64-E669-42B1-AE20-97A8B0BBEE26}' : 'Toilet', - '{A5A95611-A7F8-11CF-831A-0800091B5BDA}' : 'Fix Electrode', - '{08EC3F49-978D-4FE4-AE77-4C421335E5FF}' : 'Prune', - '{0A205CD4-1480-4F02-8AFF-4E4CD3B21078}' : 'Artifact', - '{A5A95609-A7F8-11CF-831A-0800091B5BDA}' : 'Print D', - '{A5A95637-A7F8-11CF-831A-0800091B5BDA}' : 'Tachycardia', - '{A0172995-4A24-401C-AB68-B585474E4C07}' : 'Seizure', - '{FF37D596-5703-43F9-A3F3-FA572C5D958C}' : 'Spike wave', - '{9DF82C59-6520-46E5-940F-16B1282F3DD6}' : 'EEG Check-theta li T', - '{06519E79-3C7B-4535-BA76-2AD76B6C65C8}' : 'Kom.-*', - '{CA4FCAD4-802E-4214-881A-E9C1C6549ABD}' : 'Arousal', - '{A5A95603-A7F8-11CF-831A-0800091B5BDA}' : 'Blink', - '{77A38C02-DCD4-4774-A47D-40437725B278}' : '+Anfallsmuster D-?', - '{32DB96B9-ED12-429A-B98D-27B2A82AD61F}' : 'spike wave', - '{24387A0E-AA04-40B4-82D4-6D58F24D59AB}' : 'Anfallsmuster', - '{A5A95636-A7F8-11CF-831A-0800091B5BDA}' : 'Bradycardia', - '{93A2CB2C-F420-4672-AA62-18989F768519}' : 'Detections Inactive', - '{8C5D49BA-7105-4355-BF6C-B35B9A4E594A}' : 'EEG-Check', - '{5A946B85-2E1D-46B8-9FB2-C0519C9BE681}' : 'Zaehneputzen', - '{48DA028A-5264-4620-AD03-C8787951E237}' : 'Bewegt', - '{C15CFF61-0326-4276-A08F-0BFC2354E7CC}' : 'Kratzt', - '{F4DD5874-23BA-4FFA-94DD-BE436BB6910F}' : 'Anfall', - '{A5A95610-A7F8-11CF-831A-0800091B5BDA}' : 'Flash', - '{8CB92AA7-A886-4013-8D52-6CD1C71C72B4}' : 'ETP', - } - - def __init__(self, filename = ""): + "{A5A95612-A7F8-11CF-831A-0800091B5BDA}": "Annotation", + "{A5A95646-A7F8-11CF-831A-0800091B5BDA}": "Seizure", + "{08784382-C765-11D3-90CE-00104B6F4F70}": "Format change", + "{6FF394DA-D1B8-46DA-B78F-866C67CF02AF}": "Photic", + "{481DFC97-013C-4BC5-A203-871B0375A519}": "Posthyperventilation", + "{725798BF-CD1C-4909-B793-6C7864C27AB7}": "Review progress", + "{96315D79-5C24-4A65-B334-E31A95088D55}": "Exam start", + "{A5A95608-A7F8-11CF-831A-0800091B5BDA}": "Hyperventilation", + "{A5A95617-A7F8-11CF-831A-0800091B5BDA}": "Impedance", + "{A5A95645-A7F8-11CF-831A-0800091B5BDA}": "Event Comment", + "{C3B68051-EDCF-418C-8D53-27077B92DE22}": "Spike", + "{99FFE0AA-B8F9-49E5-8390-8F072F4E00FC}": "EEG Check", + "{A5A9560A-A7F8-11CF-831A-0800091B5BDA}": "Print", + "{A5A95616-A7F8-11CF-831A-0800091B5BDA}": "Patient Event", + "{0DE05C94-7D03-47B9-864F-D586627EA891}": "Eyes closed", + "{583AA2C6-1F4E-47CF-A8D4-80C69EB8A5F3}": "Eyes open", + "{BAE4550A-8409-4289-9D8A-0D571A206BEC}": "Eating", + "{1F3A45A4-4D0F-4CC4-A43A-CAD2BC2D71F2}": "ECG", + "{B0BECF64-E669-42B1-AE20-97A8B0BBEE26}": "Toilet", + "{A5A95611-A7F8-11CF-831A-0800091B5BDA}": "Fix Electrode", + "{08EC3F49-978D-4FE4-AE77-4C421335E5FF}": "Prune", + "{0A205CD4-1480-4F02-8AFF-4E4CD3B21078}": "Artifact", + "{A5A95609-A7F8-11CF-831A-0800091B5BDA}": "Print D", + "{A5A95637-A7F8-11CF-831A-0800091B5BDA}": "Tachycardia", + "{A0172995-4A24-401C-AB68-B585474E4C07}": "Seizure", + "{FF37D596-5703-43F9-A3F3-FA572C5D958C}": "Spike wave", + "{9DF82C59-6520-46E5-940F-16B1282F3DD6}": "EEG Check-theta li T", + "{06519E79-3C7B-4535-BA76-2AD76B6C65C8}": "Kom.-*", + "{CA4FCAD4-802E-4214-881A-E9C1C6549ABD}": "Arousal", + "{A5A95603-A7F8-11CF-831A-0800091B5BDA}": "Blink", + "{77A38C02-DCD4-4774-A47D-40437725B278}": "+Anfallsmuster D-?", + "{32DB96B9-ED12-429A-B98D-27B2A82AD61F}": "spike wave", + "{24387A0E-AA04-40B4-82D4-6D58F24D59AB}": "Anfallsmuster", + "{A5A95636-A7F8-11CF-831A-0800091B5BDA}": "Bradycardia", + "{93A2CB2C-F420-4672-AA62-18989F768519}": "Detections Inactive", + "{8C5D49BA-7105-4355-BF6C-B35B9A4E594A}": "EEG-Check", + "{5A946B85-2E1D-46B8-9FB2-C0519C9BE681}": "Zaehneputzen", + "{48DA028A-5264-4620-AD03-C8787951E237}": "Bewegt", + "{C15CFF61-0326-4276-A08F-0BFC2354E7CC}": "Kratzt", + "{F4DD5874-23BA-4FFA-94DD-BE436BB6910F}": "Anfall", + "{A5A95610-A7F8-11CF-831A-0800091B5BDA}": "Flash", + "{8CB92AA7-A886-4013-8D52-6CD1C71C72B4}": "ETP", + } + + def __init__(self, filename=""): BaseRawIO.__init__(self) self.filename = filename - + def _source_name(self): - ''' + """ Returns path of the input file - ''' + """ return self.filename - + def _parse_header(self): - ''' + """ Parses the default header structure and generates some more annotions - ''' - self._extract_header_information() + """ + self._extract_header_information() self.header = {} self.header["nb_block"] = 1 - self.header["nb_segment"] = [int(self._get_index_instances('SegmentStream')[0]['section_l']/152)] - self.header["signal_buffers"] = np.array(['Signals', '0'], - dtype=_signal_buffer_dtype) + self.header["nb_segment"] = [int(self._get_index_instances("SegmentStream")[0]["section_l"] / 152)] + self.header["signal_buffers"] = np.array(["Signals", "0"], dtype=_signal_buffer_dtype) self.header["signal_channels"] = self._create_signal_channels(_signal_channel_dtype) - self.header["signal_streams"] = np.array([(f"Signals {signal_id}", signal_id, "0") - for signal_id in self.signal_streams.values()], - dtype=_signal_stream_dtype) - self.header["spike_channels"] = np.array([], - dtype= _spike_channel_dtype) - self.header["event_channels"] = np.array([("Events", "0", "event"), - ("Epochs", "1", "epoch")], - dtype = _event_channel_dtype) - + self.header["signal_streams"] = np.array( + [(f"Signals {signal_id}", signal_id, "0") for signal_id in self.signal_streams.values()], + dtype=_signal_stream_dtype, + ) + self.header["spike_channels"] = np.array([], dtype=_spike_channel_dtype) + self.header["event_channels"] = np.array( + [("Events", "0", "event"), ("Epochs", "1", "epoch")], dtype=_event_channel_dtype + ) + self._get_segment_start_times() self._get_events() self._get_montage() @@ -199,538 +198,543 @@ def _parse_header(self): self._generate_minimal_annotations() self._generate_additional_annotations() self._get_buffer_descriptions() - + def _get_tags(self): - ''' + """ Get tags that specify the index of different information within the main index pointers - ''' + """ tags_structure = [ - ('tag', 'S80'), - ('index', 'uint32'), - ] - + ("tag", "S80"), + ("index", "uint32"), + ] + with open(self.filename, "rb") as fid: fid.seek(172) - n_tags = self.read_as_list(fid, - [('n_tags', 'uint32')]) - tags = [self.read_as_dict(fid, - tags_structure) for _ in range(n_tags)] + n_tags = self.read_as_list(fid, [("n_tags", "uint32")]) + tags = [self.read_as_dict(fid, tags_structure) for _ in range(n_tags)] for entry in tags: try: - entry['id_str'] = self.TAGS_DICT[entry['tag']] + entry["id_str"] = self.TAGS_DICT[entry["tag"]] except KeyError: - entry['id_str'] = 'UNKNOWN' + entry["id_str"] = "UNKNOWN" self.n_tags = n_tags self.tags = tags def _get_qi(self): - ''' + """ Get QI that specifies the number of main index pointers, and where they are located within the file - ''' + """ qi_structure = [ - ('n_entries', 'uint32'), - ('misc1', 'uint32'), - ('index_idx', 'uint32'), - ('misc3', 'uint32'), - ('l_qi', 'uint64'), - ('first_idx', 'uint64', self.n_tags), - ] + ("n_entries", "uint32"), + ("misc1", "uint32"), + ("index_idx", "uint32"), + ("misc3", "uint32"), + ("l_qi", "uint64"), + ("first_idx", "uint64", self.n_tags), + ] with open(self.filename, "rb") as fid: fid.seek(172208) - qi = self.read_as_dict(fid, - qi_structure) + qi = self.read_as_dict(fid, qi_structure) self.qi = qi - + def _get_main_index(self): - ''' + """ Get all main index pointers. They show where - ''' + """ main_index = [] current_index = 0 - next_index_pointer = self.qi['index_idx'] + next_index_pointer = self.qi["index_idx"] with open(self.filename, "rb") as fid: - while current_index < self.qi['n_entries']: + while current_index < self.qi["n_entries"]: fid.seek(next_index_pointer) - nr_index = self.read_as_list(fid, - [('nr_index', 'uint64')] - ) - var = self.read_as_list(fid, - [('var', 'uint64', int(3*nr_index))]) + nr_index = self.read_as_list(fid, [("nr_index", "uint64")]) + var = self.read_as_list(fid, [("var", "uint64", int(3 * nr_index))]) for i in range(nr_index): - main_index.append({ - 'section_idx' : int(var[3*(i)]), - 'offset' : int(var[3*(i)+1]), - 'block_l' : int(var[3*(i)+2] % 2**32), - 'section_l' : round(var[3*(i)+2]/(2**32)), - }) - next_index_pointer = self.read_as_list(fid, - [('next_index_pointer', 'uint64')]) + main_index.append( + { + "section_idx": int(var[3 * (i)]), + "offset": int(var[3 * (i) + 1]), + "block_l": int(var[3 * (i) + 2] % 2**32), + "section_l": round(var[3 * (i) + 2] / (2**32)), + } + ) + next_index_pointer = self.read_as_list(fid, [("next_index_pointer", "uint64")]) current_index = current_index + (i + 1) self.main_index = main_index - self.all_section_ids = [entry['section_idx'] for entry in main_index] - + self.all_section_ids = [entry["section_idx"] for entry in main_index] + def _read_dynamic_packets(self): - ''' - Read the packets which specify where the data is located - ''' + """ + Read the packets which specify where the data is located + """ dynamic_packet_structure = [ - ('guid_list', 'uint8', 16), - ('date', 'float64'), - ('datefrace', 'float64'), - ('internal_offset_start', 'uint64'), - ('packet_size', 'uint64'), - ] + ("guid_list", "uint8", 16), + ("date", "float64"), + ("datefrace", "float64"), + ("internal_offset_start", "uint64"), + ("packet_size", "uint64"), + ] dynamic_packets = [] - [dynamic_packets_instace] = self._get_index_instances(id_str = 'InfoChangeStream') - offset = dynamic_packets_instace['offset'] - self.n_dynamic_packets = int(dynamic_packets_instace['section_l']/48) + [dynamic_packets_instace] = self._get_index_instances(id_str="InfoChangeStream") + offset = dynamic_packets_instace["offset"] + self.n_dynamic_packets = int(dynamic_packets_instace["section_l"] / 48) with open(self.filename, "rb") as fid: fid.seek(offset) for i in range(self.n_dynamic_packets): - guid_offset = offset + (i+1)*48 - dynamic_packet = self.read_as_dict(fid, - dynamic_packet_structure) - dynamic_packet['date'] = self._convert_to_date(dynamic_packet['date']) - guid_as_str = self._convert_to_guid(dynamic_packet['guid_list']) + guid_offset = offset + (i + 1) * 48 + dynamic_packet = self.read_as_dict(fid, dynamic_packet_structure) + dynamic_packet["date"] = self._convert_to_date(dynamic_packet["date"]) + guid_as_str = self._convert_to_guid(dynamic_packet["guid_list"]) if guid_as_str in list(self.TAGS_DICT.keys()): id_str = self.TAGS_DICT[guid_as_str] else: - id_str = 'UNKNOWN' - dynamic_packet['offset'] = int(guid_offset) - dynamic_packet['guid'] = guid_as_str.replace('-', '').replace('{', '').replace('}', '') - dynamic_packet['guid_as_str'] = guid_as_str - dynamic_packet['id_str'] = id_str + id_str = "UNKNOWN" + dynamic_packet["offset"] = int(guid_offset) + dynamic_packet["guid"] = guid_as_str.replace("-", "").replace("{", "").replace("}", "") + dynamic_packet["guid_as_str"] = guid_as_str + dynamic_packet["id_str"] = id_str dynamic_packets.append(dynamic_packet) self.dynamic_packets = dynamic_packets def _get_dynamic_packets_data(self): - ''' + """ Read the data within the dynamic packets - ''' + """ with open(self.filename, "rb") as fid: for i in range(self.n_dynamic_packets): data = [] - dynamic_packet_instances = self._get_index_instances(tag = self.dynamic_packets[i]['guid_as_str']) + dynamic_packet_instances = self._get_index_instances(tag=self.dynamic_packets[i]["guid_as_str"]) internal_offset = 0 - remaining_data_to_read = int(self.dynamic_packets[i]['packet_size']) - current_target_start = int(self.dynamic_packets[i]['internal_offset_start']) + remaining_data_to_read = int(self.dynamic_packets[i]["packet_size"]) + current_target_start = int(self.dynamic_packets[i]["internal_offset_start"]) for j in range(len(dynamic_packet_instances)): current_instance = dynamic_packet_instances[j] - if ((internal_offset <= (current_target_start)) - & ((internal_offset + current_instance['section_l']) >= current_target_start)): + if (internal_offset <= (current_target_start)) & ( + (internal_offset + current_instance["section_l"]) >= current_target_start + ): start_at = current_target_start - stop_at = min(start_at + remaining_data_to_read, - internal_offset + current_instance['section_l']) + stop_at = min( + start_at + remaining_data_to_read, internal_offset + current_instance["section_l"] + ) read_length = stop_at - start_at - file_pos_start = current_instance['offset'] + start_at - internal_offset + file_pos_start = current_instance["offset"] + start_at - internal_offset fid.seek(int(file_pos_start)) - data_part = self.read_as_list(fid, - [('data', 'uint8', read_length)]) - data = data + list(data_part) + data_part = self.read_as_list(fid, [("data", "uint8", read_length)]) + data = data + list(data_part) remaining_data_to_read = remaining_data_to_read - read_length current_target_start = current_target_start + read_length - internal_offset = internal_offset + current_instance['section_l'] - self.dynamic_packets[i]['data'] = np.array(data) - + internal_offset = internal_offset + current_instance["section_l"] + self.dynamic_packets[i]["data"] = np.array(data) + def _get_patient_guid(self): - ''' + """ Read patient metadata - ''' - [idx_instance] = self._get_index_instances(id_str = 'PATIENTINFOGUID') + """ + [idx_instance] = self._get_index_instances(id_str="PATIENTINFOGUID") patient_info_structure = [ - ('guid', 'uint8', 16), - ('l_section', 'uint64'), - ('n_values', 'uint64'), - ('n_bstr', 'uint64'), - ] + ("guid", "uint8", 16), + ("l_section", "uint64"), + ("n_values", "uint64"), + ("n_bstr", "uint64"), + ] with open(self.filename, "rb") as fid: - fid.seek(idx_instance['offset']) - patient_info = self.read_as_dict(fid, - patient_info_structure - ) - for i in range(patient_info['n_values']): - id_temp = self.read_as_list(fid, - [('value', 'uint64')]) + fid.seek(idx_instance["offset"]) + patient_info = self.read_as_dict(fid, patient_info_structure) + for i in range(patient_info["n_values"]): + id_temp = self.read_as_list(fid, [("value", "uint64")]) if id_temp in [7, 8]: - value = self.read_as_list(fid, - [('value', 'float64')]) + value = self.read_as_list(fid, [("value", "float64")]) value = self._convert_to_date(value) elif id_temp in [23, 24]: - value = self.read_as_list(fid, - [('value', 'float64')]) + value = self.read_as_list(fid, [("value", "float64")]) else: value = 0 patient_info[self.INFO_PROPS[int(id_temp) - 1]] = value - if patient_info['n_bstr'] != 0: - str_setup = self.read_as_list(fid, - [('setup', 'uint64', int(patient_info['n_bstr']*2))]) - for i in range(0, int(patient_info['n_bstr']*2), 2): + if patient_info["n_bstr"] != 0: + str_setup = self.read_as_list(fid, [("setup", "uint64", int(patient_info["n_bstr"] * 2))]) + for i in range(0, int(patient_info["n_bstr"] * 2), 2): id_temp = str_setup[i] - value = ''.join([self.read_as_list(fid, - [('value', 'S2')]) for _ in range(int(str_setup[i + 1]) + 1)]).strip() + value = "".join( + [self.read_as_list(fid, [("value", "S2")]) for _ in range(int(str_setup[i + 1]) + 1)] + ).strip() patient_info[self.INFO_PROPS[int(id_temp) - 1]] = value - + for prop in self.INFO_PROPS: if prop not in patient_info.keys(): patient_info[prop] = None self.patient_info = patient_info - + def _get_signal_properties(self): - ''' + """ Get the properties for every signal channel - ''' + """ signal_properties_segment = [ - ('name', 'S2', self.LABELSIZE), - ('transducer', 'S2', self.UNITSIZE), - ('guid', 'uint8', 16), - ('bipolar', 'uint32'), - ('ac', 'uint32'), - ('high_filter', 'uint32'), - ('color', 'uint32'), - ] + ("name", "S2", self.LABELSIZE), + ("transducer", "S2", self.UNITSIZE), + ("guid", "uint8", 16), + ("bipolar", "uint32"), + ("ac", "uint32"), + ("high_filter", "uint32"), + ("color", "uint32"), + ] signal_properties = [] signal_structure_segment = [ - ('guid', 'uint8', 16), - ('name', 'S1', self.ITEMNAMESIZE), - ] - idx_instances = self._get_index_instances('SIGNALINFOGUID') + ("guid", "uint8", 16), + ("name", "S1", self.ITEMNAMESIZE), + ] + idx_instances = self._get_index_instances("SIGNALINFOGUID") for instance in idx_instances: with open(self.filename, "rb") as fid: - fid.seek(instance['offset']) - signal_structure = self.read_as_dict(fid, - signal_structure_segment) - unknown = self.read_as_list(fid, - [('unknown', 'S1', 152)]) - fid.seek(512,1) - n_idx = self.read_as_dict(fid, - [('n_idx', 'uint16'), - ('misc1', 'uint16', 3)]) - for i in range(n_idx['n_idx']): - properties = self.read_as_dict(fid, - signal_properties_segment) + fid.seek(instance["offset"]) + signal_structure = self.read_as_dict(fid, signal_structure_segment) + unknown = self.read_as_list(fid, [("unknown", "S1", 152)]) + fid.seek(512, 1) + n_idx = self.read_as_dict(fid, [("n_idx", "uint16"), ("misc1", "uint16", 3)]) + for i in range(n_idx["n_idx"]): + properties = self.read_as_dict(fid, signal_properties_segment) signal_properties.append(properties) - reserved = self.read_as_list(fid, - [('reserved', 'S1', 256)]) + reserved = self.read_as_list(fid, [("reserved", "S1", 256)]) self.signal_structure = signal_structure self.signal_properties = signal_properties pass - + def _get_channel_info(self): - ''' + """ Get the properties for every signal channel - ''' + """ channel_properties = [] - channel_structure_structure= [ - [('guid', 'uint8', 16), - ('name', 'S1', self.ITEMNAMESIZE), + channel_structure_structure = [ + [ + ("guid", "uint8", 16), + ("name", "S1", self.ITEMNAMESIZE), ], - [('reserved', 'uint8', 16), - ('device_id', 'uint8', 16), + [ + ("reserved", "uint8", 16), + ("device_id", "uint8", 16), ], - ] - idx_instance = self._get_index_instances('CHANNELGUID')[0] + ] + idx_instance = self._get_index_instances("CHANNELGUID")[0] with open(self.filename, "rb") as fid: - fid.seek(idx_instance['offset']) - channel_structure = self.read_as_dict(fid, - channel_structure_structure[0]) + fid.seek(idx_instance["offset"]) + channel_structure = self.read_as_dict(fid, channel_structure_structure[0]) fid.seek(152, 1) - channel_structure = channel_structure | self.read_as_dict(fid, - channel_structure_structure[1]) - fid.seek(488,1) - n_index = self.read_as_list(fid, - [('n_index', 'int32', 2)]) + channel_structure = channel_structure | self.read_as_dict(fid, channel_structure_structure[1]) + fid.seek(488, 1) + n_index = self.read_as_list(fid, [("n_index", "int32", 2)]) current_index = 0 for i in range(n_index[1]): channel_properties_structure = [ - ('sensor', 'S2', self.LABELSIZE), - ('sampling_rate', 'float64'), - ('on', 'uint32'), - ('l_input_id', 'uint32'), - ('l_input_setting_id', 'uint32'), - ] - info = self.read_as_dict(fid, - channel_properties_structure) + ("sensor", "S2", self.LABELSIZE), + ("sampling_rate", "float64"), + ("on", "uint32"), + ("l_input_id", "uint32"), + ("l_input_setting_id", "uint32"), + ] + info = self.read_as_dict(fid, channel_properties_structure) fid.seek(128, 1) - if info['on']: + if info["on"]: index_id = current_index current_index += 1 else: index_id = -1 - info['index_id'] = index_id + info["index_id"] = index_id channel_properties.append(info) - reserved = self.read_as_list(fid, - [('reserved', 'S1', 4)]) + reserved = self.read_as_list(fid, [("reserved", "S1", 4)]) self.channel_structure = channel_structure self.channel_properties = channel_properties - - def _get_ts_properties(self, ts_packet_index = 0): - ''' + + def _get_ts_properties(self, ts_packet_index=0): + """ Read properties of every timestream. Currently, only the first instance of the timestream is used for every segment - ''' + """ ts_packets_properties = [] - ts_packets = [packet for packet in self.dynamic_packets if packet['id_str'] == 'TSGUID'] + ts_packets = [packet for packet in self.dynamic_packets if packet["id_str"] == "TSGUID"] for ts_packet in ts_packets: ts_properties = [] - elems = self._typecast(ts_packet['data'][752:756])[0] + elems = self._typecast(ts_packet["data"][752:756])[0] offset = 760 for _ in range(elems): - internal_offset = 0 - top_range = (offset + self.TSLABELSIZE) - label = self._transform_ts_properties(ts_packet['data'][offset:top_range], np.uint16) - internal_offset += 2*self.TSLABELSIZE + internal_offset = 0 + top_range = offset + self.TSLABELSIZE + label = self._transform_ts_properties(ts_packet["data"][offset:top_range], np.uint16) + internal_offset += 2 * self.TSLABELSIZE top_range = offset + internal_offset + self.LABELSIZE - active_sensor = self._transform_ts_properties(ts_packet['data'][(offset + internal_offset):top_range], np.uint16) + active_sensor = self._transform_ts_properties( + ts_packet["data"][(offset + internal_offset) : top_range], np.uint16 + ) internal_offset = internal_offset + self.TSLABELSIZE top_range = offset + internal_offset + 8 - ref_sensor = self._transform_ts_properties(ts_packet['data'][(offset + internal_offset):top_range], np.uint16) + ref_sensor = self._transform_ts_properties( + ts_packet["data"][(offset + internal_offset) : top_range], np.uint16 + ) internal_offset += 64 - low_cut, internal_offset = self._read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) - high_cut, internal_offset = self._read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) - sampling_rate, internal_offset = self._read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) - resolution, internal_offset = self._read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) - mark, internal_offset = self._read_ts_properties(ts_packet['data'], offset, internal_offset, np.uint16) - notch, internal_offset = self._read_ts_properties(ts_packet['data'], offset, internal_offset, np.uint16) - eeg_offset, internal_offset = self._read_ts_properties(ts_packet['data'], offset, internal_offset, np.float64) + low_cut, internal_offset = self._read_ts_properties( + ts_packet["data"], offset, internal_offset, np.float64 + ) + high_cut, internal_offset = self._read_ts_properties( + ts_packet["data"], offset, internal_offset, np.float64 + ) + sampling_rate, internal_offset = self._read_ts_properties( + ts_packet["data"], offset, internal_offset, np.float64 + ) + resolution, internal_offset = self._read_ts_properties( + ts_packet["data"], offset, internal_offset, np.float64 + ) + mark, internal_offset = self._read_ts_properties(ts_packet["data"], offset, internal_offset, np.uint16) + notch, internal_offset = self._read_ts_properties(ts_packet["data"], offset, internal_offset, np.uint16) + eeg_offset, internal_offset = self._read_ts_properties( + ts_packet["data"], offset, internal_offset, np.float64 + ) offset += 552 - ts_properties.append({ - 'label' : label, - 'active_sensor' : active_sensor, - 'ref_sensor' : ref_sensor, - 'low_cut' : low_cut, - 'high_cut' : high_cut, - 'sampling_rate' : sampling_rate, - 'resolution' : resolution, - 'notch' : notch, - 'mark' : mark, - 'eeg_offset' : eeg_offset, - }) + ts_properties.append( + { + "label": label, + "active_sensor": active_sensor, + "ref_sensor": ref_sensor, + "low_cut": low_cut, + "high_cut": high_cut, + "sampling_rate": sampling_rate, + "resolution": resolution, + "notch": notch, + "mark": mark, + "eeg_offset": eeg_offset, + } + ) ts_packets_properties.append(ts_properties) - self.ts_packets = ts_packets + self.ts_packets = ts_packets self.ts_packets_properties = ts_packets_properties self.ts_properties = ts_packets_properties[ts_packet_index] pass - + def _get_segment_start_times(self): - ''' + """ Get the start and stop times and the duration of each segment - ''' + """ segments_properties = [] - [segment_instance] = self._get_index_instances('SegmentStream') - n_segments = int(segment_instance['section_l']/152) + [segment_instance] = self._get_index_instances("SegmentStream") + n_segments = int(segment_instance["section_l"] / 152) with open(self.filename, "rb") as fid: - fid.seek(segment_instance['offset'], 0) + fid.seek(segment_instance["offset"], 0) for i in range(n_segments): - segment_info = {} - segment_info['date_ole'] = self.read_as_list(fid, - [('date', 'float64')]) - date_str = self._convert_ole_to_datetime(segment_info['date_ole']) - fid.seek(8,1) - segment_info['duration'] = self.read_as_list(fid, - [('duration', 'float64')]) + segment_info = {} + segment_info["date_ole"] = self.read_as_list(fid, [("date", "float64")]) + date_str = self._convert_ole_to_datetime(segment_info["date_ole"]) + fid.seek(8, 1) + segment_info["duration"] = self.read_as_list(fid, [("duration", "float64")]) fid.seek(128, 1) - segment_info['ch_names'] = [channel[0] for channel in self.header['signal_channels']] - #segment_info['ref_names'] = [info['ref_sensor'] for info in self.ts_properties] - segment_info['sampling_rates'] = [channel[2] for channel in self.header['signal_channels']] - segment_info['scale'] = [channel[5] for channel in self.header['signal_channels']] - segment_info['date'] = date_str - segment_info['start_date'] = date_str.date() - segment_info['start_time'] = date_str.time() - segment_info['duration'] = timedelta(seconds = segment_info['duration']) + segment_info["ch_names"] = [channel[0] for channel in self.header["signal_channels"]] + # segment_info['ref_names'] = [info['ref_sensor'] for info in self.ts_properties] + segment_info["sampling_rates"] = [channel[2] for channel in self.header["signal_channels"]] + segment_info["scale"] = [channel[5] for channel in self.header["signal_channels"]] + segment_info["date"] = date_str + segment_info["start_date"] = date_str.date() + segment_info["start_time"] = date_str.time() + segment_info["duration"] = timedelta(seconds=segment_info["duration"]) segments_properties.append(segment_info) self.segments_properties = segments_properties - + def _get_events(self): - ''' + """ Read all events - ''' + """ events = [] - event_packet_guid = '{B799F680-72A4-11D3-93D3-00500400C148}' - event_instances = self._ensure_list(self._get_index_instances(tag = 'Events')) + event_packet_guid = "{B799F680-72A4-11D3-93D3-00500400C148}" + event_instances = self._ensure_list(self._get_index_instances(tag="Events")) for instance in event_instances: - offset = instance['offset'] + offset = instance["offset"] with open(self.filename, "rb") as fid: pkt_structure = [ - ('guid', 'uint8', 16), - ('len', 'uint64'), - ] + ("guid", "uint8", 16), + ("len", "uint64"), + ] fid.seek(offset) - pkt = self.read_as_dict(fid, - pkt_structure) - pkt['guid'] = self._convert_to_guid(pkt['guid']) + pkt = self.read_as_dict(fid, pkt_structure) + pkt["guid"] = self._convert_to_guid(pkt["guid"]) n_events = 0 - while (pkt['guid'] == event_packet_guid): + while pkt["guid"] == event_packet_guid: event_structure = [ - [('date_ole', 'float64'), - ('date_fraction', 'float64'), - ('duration', 'float64'), - ], - [('user', 'S2', 12), - ('text_length', 'uint64'), - ('guid', 'uint8', 16), - ], - [('label', 'S2', 32)], - ] + [ + ("date_ole", "float64"), + ("date_fraction", "float64"), + ("duration", "float64"), + ], + [ + ("user", "S2", 12), + ("text_length", "uint64"), + ("guid", "uint8", 16), + ], + [("label", "S2", 32)], + ] n_events += 1 try: fid.seek(8, 1) - event = self.read_as_dict(fid, - event_structure[0]) + event = self.read_as_dict(fid, event_structure[0]) fid.seek(48, 1) - event = event | self.read_as_dict(fid, - event_structure[1]) + event = event | self.read_as_dict(fid, event_structure[1]) fid.seek(16, 1) - event = event | self.read_as_dict(fid, - event_structure[2]) - event['date'] = self._convert_ole_to_datetime(event['date_ole'], event['date_fraction']) - event['timestamp'] = (event['date'] - self.segments_properties[0]['date']).total_seconds() - event['guid'] = self._convert_to_guid(event['guid']) + event = event | self.read_as_dict(fid, event_structure[2]) + event["date"] = self._convert_ole_to_datetime(event["date_ole"], event["date_fraction"]) + event["timestamp"] = (event["date"] - self.segments_properties[0]["date"]).total_seconds() + event["guid"] = self._convert_to_guid(event["guid"]) try: - id_str = self.HC_EVENT[event['guid']] + id_str = self.HC_EVENT[event["guid"]] except: - id_str = 'UNKNOWN' - if id_str == 'Annotation' or id_str == 'Event Comment': + id_str = "UNKNOWN" + if id_str == "Annotation" or id_str == "Event Comment": fid.seek(31, 1) - annotation = self.read_as_list(fid, - [('annotation', 'S2', event['text_length'])]) + annotation = self.read_as_list(fid, [("annotation", "S2", event["text_length"])]) else: - annotation = '' - event['id_str'] = id_str - event['annotation'] = annotation - event['block_index'] = 0 + annotation = "" + event["id_str"] = id_str + event["annotation"] = annotation + event["block_index"] = 0 seg_index = 0 - segment_time_range = [segment['date'] for segment in self.segments_properties] + segment_time_range = [segment["date"] for segment in self.segments_properties] for segment_time in segment_time_range[1:]: - if segment_time < event['date']: + if segment_time < event["date"]: seg_index += 1 - event['seg_index'] = seg_index + event["seg_index"] = seg_index events.append(event) - event['type'] = '0' if event['duration'] == 0 else '1' + event["type"] = "0" if event["duration"] == 0 else "1" except: - warnings.warn(f'Not all events could not be read, only {n_events - 1} events were read', BytesWarning) + warnings.warn( + f"Not all events could not be read, only {n_events - 1} events were read", BytesWarning + ) break - offset += int(pkt['len']) + offset += int(pkt["len"]) fid.seek(offset) - pkt = self.read_as_dict(fid, - pkt_structure) - pkt['guid'] = self._convert_to_guid(pkt['guid']) + pkt = self.read_as_dict(fid, pkt_structure) + pkt["guid"] = self._convert_to_guid(pkt["guid"]) self.events = events pass - - def _convert_ole_to_datetime(self, date_ole, date_fraction = 0): - '''Date is saved as OLE with the timezone offset integrated in the file. Transform this to datetime object and add the date_fraction if provided''' - return datetime.fromtimestamp((date_ole - 25569) * 24 * 3600 + date_fraction, - tz = timezone.utc) - + + def _convert_ole_to_datetime(self, date_ole, date_fraction=0): + """Date is saved as OLE with the timezone offset integrated in the file. Transform this to datetime object and add the date_fraction if provided""" + return datetime.fromtimestamp((date_ole - 25569) * 24 * 3600 + date_fraction, tz=timezone.utc) + def _get_montage(self): - ''' + """ Read the montages - ''' + """ montages = [] - montage_instances = self._get_index_instances(id_str = 'DERIVATIONGUID') + montage_instances = self._get_index_instances(id_str="DERIVATIONGUID") with open(self.filename, "rb") as fid: montage_info_structure = [ - [('name', 'S2', 32), - ], - [('n_derivations', 'uint32'), - ('n_derivations_2', 'uint32'), - ], - [('derivation_name', 'S2', 64), - ('signal_name_1', 'S2', 32), - ('signal_name_2', 'S2', 32), - ], - ] - fid.seek(int(montage_instances[0]['offset']) + 40) - montage_info = self.read_as_dict(fid, - montage_info_structure[0]) + [ + ("name", "S2", 32), + ], + [ + ("n_derivations", "uint32"), + ("n_derivations_2", "uint32"), + ], + [ + ("derivation_name", "S2", 64), + ("signal_name_1", "S2", 32), + ("signal_name_2", "S2", 32), + ], + ] + fid.seek(int(montage_instances[0]["offset"]) + 40) + montage_info = self.read_as_dict(fid, montage_info_structure[0]) fid.seek(640, 1) - montage_info = montage_info | self.read_as_dict(fid, - montage_info_structure[1]) + montage_info = montage_info | self.read_as_dict(fid, montage_info_structure[1]) - for i in range(montage_info['n_derivations']): - montage = montage_info | self.read_as_dict(fid, - montage_info_structure[2]) + for i in range(montage_info["n_derivations"]): + montage = montage_info | self.read_as_dict(fid, montage_info_structure[2]) fid.seek(264, 1) montages.append(montage) - display_instances = self._get_index_instances(id_str = 'DISPLAYGUID') - display_structure = [[ - ('name', 'S2', 32)], - [('n_traces', 'uint32'), - ('n_traces_2', 'uint32'), - ], - [('color', 'uint32')], - ] - fid.seek(int(display_instances[0]['offset']) + 40) - display = self.read_as_dict(fid, - display_structure[0]) + display_instances = self._get_index_instances(id_str="DISPLAYGUID") + display_structure = [ + [("name", "S2", 32)], + [ + ("n_traces", "uint32"), + ("n_traces_2", "uint32"), + ], + [("color", "uint32")], + ] + fid.seek(int(display_instances[0]["offset"]) + 40) + display = self.read_as_dict(fid, display_structure[0]) fid.seek(640, 1) - display = display | self.read_as_dict(fid, - display_structure[1]) - if display['n_traces'] == montage_info['n_derivations']: - for i in range(display['n_traces']): + display = display | self.read_as_dict(fid, display_structure[1]) + if display["n_traces"] == montage_info["n_derivations"]: + for i in range(display["n_traces"]): fid.seek(32, 1) - montages[i]['disp_name'] = display['name'] - montages[i]['color'] = self.read_as_list(fid, - display_structure[2]) + montages[i]["disp_name"] = display["name"] + montages[i]["color"] = self.read_as_list(fid, display_structure[2]) else: - print('Could not match montage derivations with display color table') + print("Could not match montage derivations with display color table") self.montages = montages self.display = display - - def get_nr_samples(self, block_index = 0, seg_index = 0, stream_index = 0): - ''' + + def get_nr_samples(self, block_index=0, seg_index=0, stream_index=0): + """ Get the number of samples for a given signal stream in a given segment - ''' + """ try: - duration = self.segments_properties[seg_index]['duration'].total_seconds() - return([int(sampling_rate * duration) for sampling_rate in self.segments_properties[seg_index]['sampling_rates'] if self.signal_streams[sampling_rate] == stream_index]) + duration = self.segments_properties[seg_index]["duration"].total_seconds() + return [ + int(sampling_rate * duration) + for sampling_rate in self.segments_properties[seg_index]["sampling_rates"] + if self.signal_streams[sampling_rate] == stream_index + ] except IndexError as error: - print(str(error) + ': Incorrect segment argument; seg_index must be an integer representing segment index, starting from 0.') + print( + str(error) + + ": Incorrect segment argument; seg_index must be an integer representing segment index, starting from 0." + ) pass - + def _get_raw_signal(self): - ''' + """ Create a memmap of the raw signal - ''' - earliest_signal_index = [tag['tag'] for tag in self.tags].index('0') - offset = [index['offset'] for index in self.main_index if index['section_idx'] == earliest_signal_index][0] - - raw_signal = np.memmap(self.filename, dtype="i2", offset = offset, mode="r") + """ + earliest_signal_index = [tag["tag"] for tag in self.tags].index("0") + offset = [index["offset"] for index in self.main_index if index["section_idx"] == earliest_signal_index][0] + + raw_signal = np.memmap(self.filename, dtype="i2", offset=offset, mode="r") self.signal_data_offset = offset self.raw_signal = raw_signal - + def _get_buffer_descriptions(self): - ''' + """ Get the descriptions of raw signal buffers - ''' + """ buffer_id = 0 self._buffer_descriptions = {0: {}} for seg_index, segment in enumerate(self.segments_properties): - current_samplingrate = segment['sampling_rates'][0] #Non signal-stream specific, just take the sampling rate of the first channel - skip_values = ([0] + list(np.cumsum([(segment['duration'].total_seconds()) for segment in self.segments_properties])))[seg_index] * current_samplingrate - [tag_idx] = [tag['index'] for tag in self.tags if tag['tag'] == '0'] + current_samplingrate = segment["sampling_rates"][ + 0 + ] # Non signal-stream specific, just take the sampling rate of the first channel + skip_values = ( + [0] + list(np.cumsum([(segment["duration"].total_seconds()) for segment in self.segments_properties])) + )[seg_index] * current_samplingrate + [tag_idx] = [tag["index"] for tag in self.tags if tag["tag"] == "0"] all_sections = [j for j, idx_id in enumerate(self.all_section_ids) if idx_id == tag_idx] - section_lengths = [0] + list(np.cumsum([int(index['section_l']/2) for j, index in enumerate(self.main_index) if j in all_sections])) + section_lengths = [0] + list( + np.cumsum([int(index["section_l"] / 2) for j, index in enumerate(self.main_index) if j in all_sections]) + ) first_section_for_seg = self._get_relevant_section(section_lengths, skip_values) - 1 - offset = self.main_index[all_sections[first_section_for_seg]]['offset'] - shape = (max(self.get_nr_samples(seg_index = seg_index)), - segment['sampling_rates'].count(segment['sampling_rates'][0])) + offset = self.main_index[all_sections[first_section_for_seg]]["offset"] + shape = ( + max(self.get_nr_samples(seg_index=seg_index)), + segment["sampling_rates"].count(segment["sampling_rates"][0]), + ) self._buffer_descriptions[0][seg_index] = {} self._buffer_descriptions[0][seg_index][buffer_id] = { - "type": "raw", - "file_path": str(self.filename), - "dtype": 'i2', - "order": "C", - "file_offset": offset, - "shape": shape, - } - + "type": "raw", + "file_path": str(self.filename), + "dtype": "i2", + "order": "C", + "file_offset": offset, + "shape": shape, + } + def _extract_header_information(self): - ''' + """ Create header information by reading file metadata - ''' - self._get_tags() - self._get_qi() + """ + self._get_tags() + self._get_qi() self._get_main_index() self._read_dynamic_packets() self._get_dynamic_packets_data() @@ -738,350 +742,386 @@ def _extract_header_information(self): self._get_signal_properties() self._get_channel_info() self._get_ts_properties() - + def _create_signal_channels(self, dtype): - ''' + """ Create information for signal channels based on timestream and signal_properties - ''' + """ signal_channels = [] signal_streams = {} stream_id = 0 for i, channel in enumerate(self.channel_properties): - signal = next((item for item in self.signal_properties if item['name'] == channel['sensor']), None) - timestream = next((item for item in self.ts_properties if item['label'] == channel['sensor']), None) + signal = next((item for item in self.signal_properties if item["name"] == channel["sensor"]), None) + timestream = next((item for item in self.ts_properties if item["label"] == channel["sensor"]), None) if signal is None: continue - if channel['sampling_rate'] not in signal_streams.keys(): - signal_streams[channel['sampling_rate']] = stream_id + if channel["sampling_rate"] not in signal_streams.keys(): + signal_streams[channel["sampling_rate"]] = stream_id stream_id += 1 - channel['sampling_rate'] - signal_channels.append(( - channel['sensor'], - i, - int(channel['sampling_rate']), - 'int16', - signal['transducer'], - timestream['resolution'], - timestream['eeg_offset'], - signal_streams[channel['sampling_rate']], - '0')) + channel["sampling_rate"] + signal_channels.append( + ( + channel["sensor"], + i, + int(channel["sampling_rate"]), + "int16", + signal["transducer"], + timestream["resolution"], + timestream["eeg_offset"], + signal_streams[channel["sampling_rate"]], + "0", + ) + ) self.signal_streams = signal_streams - return np.array(signal_channels, dtype = dtype) - + return np.array(signal_channels, dtype=dtype) + def _generate_additional_annotations(self): - ''' + """ Add file metadata to all blocks and segments - ''' - for block_index in range(self.header['nb_block']): + """ + for block_index in range(self.header["nb_block"]): bl_annotations = self.raw_annotations["blocks"][block_index] - bl_annotations['date'] = self.segments_properties[0]['date'] + bl_annotations["date"] = self.segments_properties[0]["date"] try: - bl_annotations['firstname'] = self.patient_info['firstName'] - bl_annotations['surname'] = self.patient_info['lastName'] + bl_annotations["firstname"] = self.patient_info["firstName"] + bl_annotations["surname"] = self.patient_info["lastName"] except KeyError: - bl_annotations['name'] = self.patient_info['altID'] - bl_annotations['duration'] = sum([properties['duration'].total_seconds() for properties in self.segments_properties]) - for i, seg_annotations in enumerate(bl_annotations['segments']): + bl_annotations["name"] = self.patient_info["altID"] + bl_annotations["duration"] = sum( + [properties["duration"].total_seconds() for properties in self.segments_properties] + ) + for i, seg_annotations in enumerate(bl_annotations["segments"]): try: - seg_annotations['firstname'] = self.patient_info['firstName'] - seg_annotations['surname'] = self.patient_info['lastName'] + seg_annotations["firstname"] = self.patient_info["firstName"] + seg_annotations["surname"] = self.patient_info["lastName"] except KeyError: - seg_annotations['name'] = self.patient_info['altID'] - seg_annotations['date'] = self.segments_properties[i]['date'] - seg_annotations['duration'] = self.segments_properties[i]['duration'].total_seconds() - - def _get_analogsignal_chunk(self, - block_index: int = 0, - seg_index: int = 0, - i_start: int = None, - i_stop: int = None, - stream_index: int = None, - channel_indexes: np.ndarray | list | slice = None): - ''' + seg_annotations["name"] = self.patient_info["altID"] + seg_annotations["date"] = self.segments_properties[i]["date"] + seg_annotations["duration"] = self.segments_properties[i]["duration"].total_seconds() + + def _get_analogsignal_chunk( + self, + block_index: int = 0, + seg_index: int = 0, + i_start: int = None, + i_stop: int = None, + stream_index: int = None, + channel_indexes: np.ndarray | list | slice = None, + ): + """ Read a chunk of signal from the memmap - ''' - if block_index >= self.header['nb_block']: + """ + if block_index >= self.header["nb_block"]: raise IndexError(f"Block Index out of range. There are {self.header['nb_block']} blocks in the file") - - if seg_index >= self.header['nb_segment'][block_index]: - raise IndexError(f"Segment Index out of range. There are {self.header['nb_segment'][block_index]} segments for block {block_index}") - + + if seg_index >= self.header["nb_segment"][block_index]: + raise IndexError( + f"Segment Index out of range. There are {self.header['nb_segment'][block_index]} segments for block {block_index}" + ) + if channel_indexes is None: - channel_indexes = [i for i, channel in enumerate(self.header['signal_channels']) if channel['stream_id'] == str(stream_index)] + channel_indexes = [ + i + for i, channel in enumerate(self.header["signal_channels"]) + if channel["stream_id"] == str(stream_index) + ] elif isinstance(channel_indexes, slice): if channel_indexes == slice(None): - channel_indexes = [i for i, channel in enumerate(self.header['signal_channels']) if channel['stream_id'] == str(stream_index)] - channel_indexes = np.arange(self.header['signal_channels'].shape[0], dtype="int")[channel_indexes] + channel_indexes = [ + i + for i, channel in enumerate(self.header["signal_channels"]) + if channel["stream_id"] == str(stream_index) + ] + channel_indexes = np.arange(self.header["signal_channels"].shape[0], dtype="int")[channel_indexes] else: channel_indexes = np.asarray(channel_indexes) if any(channel_indexes < 0): raise IndexError("Channel Indices cannot be negative") - if any(channel_indexes >= self.header['signal_channels'].shape[0]): + if any(channel_indexes >= self.header["signal_channels"].shape[0]): raise IndexError("Channel Indices out of range") - + if i_start is None: i_start = 0 - + if i_stop is None: - i_stop = max(self.get_nr_samples(seg_index = seg_index, stream_index = stream_index)) - - if i_start < 0 or i_stop > max(self.get_nr_samples(seg_index = seg_index, stream_index = stream_index)): #Get the maximum number of samples for the respective sampling rate + i_stop = max(self.get_nr_samples(seg_index=seg_index, stream_index=stream_index)) + + if i_start < 0 or i_stop > max( + self.get_nr_samples(seg_index=seg_index, stream_index=stream_index) + ): # Get the maximum number of samples for the respective sampling rate raise IndexError("Start or Stop Index out of bounds") - - cum_segment_duration = [0] + list(np.cumsum([(segment['duration'].total_seconds()) for segment in self.segments_properties])) + + cum_segment_duration = [0] + list( + np.cumsum([(segment["duration"].total_seconds()) for segment in self.segments_properties]) + ) data = np.empty([i_stop - i_start, len(channel_indexes)]) for i in range(len(channel_indexes)): - current_samplingrate = self.segments_properties[seg_index]['sampling_rates'][i] - multiplicator = self.segments_properties[seg_index]['scale'][i] - [tag_idx] = [tag['index'] for tag in self.tags if tag['tag'] == str(i)] + current_samplingrate = self.segments_properties[seg_index]["sampling_rates"][i] + multiplicator = self.segments_properties[seg_index]["scale"][i] + [tag_idx] = [tag["index"] for tag in self.tags if tag["tag"] == str(i)] all_sections = [j for j, idx_id in enumerate(self.all_section_ids) if idx_id == tag_idx] - section_lengths = [int(index['section_l']/2) for j, index in enumerate(self.main_index) if j in all_sections] - cum_section_lengths = [0] + list(np.cumsum(section_lengths)) + section_lengths = [ + int(index["section_l"] / 2) for j, index in enumerate(self.main_index) if j in all_sections + ] + cum_section_lengths = [0] + list(np.cumsum(section_lengths)) skip_values = cum_segment_duration[seg_index] * current_samplingrate first_section_for_seg = self._get_relevant_section(cum_section_lengths, skip_values) - 1 - last_section_for_seg = self._get_relevant_section(cum_section_lengths, - current_samplingrate* - self.segments_properties[seg_index]['duration'].total_seconds()) - 1 + first_section_for_seg + last_section_for_seg = ( + self._get_relevant_section( + cum_section_lengths, + current_samplingrate * self.segments_properties[seg_index]["duration"].total_seconds(), + ) + - 1 + + first_section_for_seg + ) use_sections = all_sections[first_section_for_seg:last_section_for_seg] use_sections_length = section_lengths[first_section_for_seg:last_section_for_seg] np_idx = 0 for j, (section_idx, section_length) in enumerate(zip(use_sections, use_sections_length)): cur_sec = self.main_index[section_idx] - start = int((cur_sec['offset'] - self.signal_data_offset)/2) - if (i_start > start): + start = int((cur_sec["offset"] - self.signal_data_offset) / 2) + if i_start > start: start = i_start - if (i_stop - i_start) < (section_length*(j+1)): - stop = start + (i_stop - i_start - section_length*j) + if (i_stop - i_start) < (section_length * (j + 1)): + stop = start + (i_stop - i_start - section_length * j) else: - stop = start + section_length - data[np_idx:(np_idx + section_length), i] = multiplicator*self.raw_signal[slice(start, stop)] + stop = start + section_length + data[np_idx : (np_idx + section_length), i] = multiplicator * self.raw_signal[slice(start, stop)] np_idx += section_length return data - + def _segment_t_start(self, block_index: int, seg_index: int): - ''' + """ Get start time for a given segment - ''' + """ all_starts = [] - for block_index in range(self.header['nb_block']): + for block_index in range(self.header["nb_block"]): bl_annotation = self.raw_annotations["blocks"][block_index] block_starts = [0] startime = 0 - for seg_annotation in (bl_annotation['segments'][1:]): - startime += seg_annotation['duration'] + for seg_annotation in bl_annotation["segments"][1:]: + startime += seg_annotation["duration"] block_starts.append(float(startime)) all_starts.append(block_starts) return all_starts[block_index][seg_index] - + def _segment_t_stop(self, block_index: int, seg_index: int): - ''' + """ Get stop time for a given segment - ''' + """ all_stops = [] - for block_index in range(self.header['nb_block']): + for block_index in range(self.header["nb_block"]): bl_annotation = self.raw_annotations["blocks"][block_index] block_stops = [] stoptime = 0 - for seg_annotation in (bl_annotation['segments']): - stoptime += seg_annotation['duration'] + for seg_annotation in bl_annotation["segments"]: + stoptime += seg_annotation["duration"] block_stops.append(float(stoptime)) - all_stops.append(block_stops) + all_stops.append(block_stops) return all_stops[block_index][seg_index] - + def _get_signal_size(self, block_index: int = 0, seg_index: int = 0, stream_index: int = 0): - ''' + """ Get the maximum number of samples in a channel for a given stream and given segment - ''' - return max(self.get_nr_samples(block_index = block_index, - seg_index = seg_index, - stream_index = stream_index)) - + """ + return max(self.get_nr_samples(block_index=block_index, seg_index=seg_index, stream_index=stream_index)) + def _get_signal_t_start(self, block_index: int = 0, seg_index: int = 0, stream_index: int = 0): return self._segment_t_start(block_index, seg_index) - + def _spike_count(self, block_index: int, seg_index: int, spike_channel_index: int): return 0 - + def _get_spike_timestamps( self, block_index: int, seg_index: int, spike_channel_index: int, t_start: float | None, t_stop: float | None ): return None - + def _rescale_spike_timestamp(self, spike_timestamps: np.ndarray, dtype: np.dtype): return None - + def _event_count(self, block_index: int = 0, seg_index: int = 0, event_channel_index: int = 0): - ''' + """ Get the number of events for a given segment and event channel - ''' - return len([event for event in self.events if (event['block_index'] == block_index - and event['seg_index'] == seg_index - and event['type'] == str(event_channel_index))]) - + """ + return len( + [ + event + for event in self.events + if ( + event["block_index"] == block_index + and event["seg_index"] == seg_index + and event["type"] == str(event_channel_index) + ) + ] + ) + def _get_event_timestamps( - self, block_index: int = 0, seg_index: int = 0, event_channel_index: int = 0, t_start: float = None, t_stop: float = None + self, + block_index: int = 0, + seg_index: int = 0, + event_channel_index: int = 0, + t_start: float = None, + t_stop: float = None, ): - ''' + """ Get timestamps of all events for a given segment and event channel. Optionally, provide a time range - ''' - events = [event for event in self.events if event['type'] == str(event_channel_index) and event['seg_index'] == seg_index] - timestamp = np.array([event['timestamp'] for event in events], dtype="float64") - durations = np.array([event['duration'] for event in events], dtype="float64") - labels = np.array([event['id_str'] for event in events], dtype="U12") + """ + events = [ + event + for event in self.events + if event["type"] == str(event_channel_index) and event["seg_index"] == seg_index + ] + timestamp = np.array([event["timestamp"] for event in events], dtype="float64") + durations = np.array([event["duration"] for event in events], dtype="float64") + labels = np.array([event["id_str"] for event in events], dtype="U12") if t_start is not None: keep = timestamp >= t_start timestamp, durations, labels = timestamp[keep], durations[keep], labels[keep] if t_stop is not None: keep = timestamp <= t_stop timestamp, durations, labels = timestamp[keep], durations[keep], labels[keep] - if seg_index == '0': + if seg_index == "0": durations = None return timestamp, durations, labels def _rescale_event_timestamp(self, event_timestamps: np.ndarray, dtype: np.dtype, event_channel_index: int): event_times = event_timestamps.astype(dtype) - return event_times - + return event_times + def _rescale_epoch_duration(self, raw_duration: np.ndarray, dtype: np.dtype, event_channel_index: int): durations = raw_duration.astype(dtype) return durations - - def _get_index_instances(self, id_str = '', tag = ''): - ''' + + def _get_index_instances(self, id_str="", tag=""): + """ Return the main index information for an id_string or a tag - ''' - identifier = 'id_str' + """ + identifier = "id_str" if tag: - identifier = 'tag' + identifier = "tag" id_str = tag try: info_idx = [entry[identifier] for entry in self.tags].index(id_str) - matching_idx = [entry['section_idx'] == info_idx for entry in self.main_index] + matching_idx = [entry["section_idx"] == info_idx for entry in self.main_index] idx_instance = [entry for entry, match in zip(self.main_index, matching_idx) if match] except: warnings.warn(f'No entries for instance "{id_str}" found', BytesWarning) - idx_instance = [{ - 'section_idx': 0, - 'offset' : 0, - 'block_l': 0, - 'section_l': 0 - }] - return(idx_instance) - + idx_instance = [{"section_idx": 0, "offset": 0, "block_l": 0, "section_l": 0}] + return idx_instance + def _get_analogsignal_buffer_description(self, block_index, seg_index, buffer_id): - ''' + """ Get the description of a signal buffer for a given segment - ''' + """ return self._buffer_descriptions[block_index][seg_index][buffer_id] def read_as_dict(self, fid, dtype): - ''' + """ Read bytes from the given binary file and return the results as a dictinary - ''' - info = dict() + """ + info = dict() dt = np.dtype(dtype) h = np.frombuffer(fid.read(dt.itemsize), dt)[0] for k in dt.names: v = h[k] v = self._process_bytes(v, dt[k]) - info[k] = v + info[k] = v return info def read_as_list(self, fid, dtype): - ''' + """ Read bytes from the given binary file and return the results as a list - ''' + """ dt = np.dtype(dtype) if dt.itemsize == 0: return [] h = np.frombuffer(fid.read(dt.itemsize), dt)[0][0] h = self._process_bytes(h, dt[0]) return h - + def _process_bytes(self, byte_data, data_type): - ''' + """ Concatenate list of byte data into a single string and decode string data - ''' - is_list_of_binaries = (type(byte_data) == np.ndarray and type(byte_data[0]) == np.bytes_) - byte_obj = b''.join(byte_data) if is_list_of_binaries else byte_data + """ + is_list_of_binaries = type(byte_data) == np.ndarray and type(byte_data[0]) == np.bytes_ + byte_obj = b"".join(byte_data) if is_list_of_binaries else byte_data bytes_decoded = self._decode_string(byte_obj) if data_type.kind == "S" or is_list_of_binaries else byte_obj return bytes_decoded def _decode_string(self, string): - ''' + """ Decode string data - ''' + """ try: string = string.decode("utf8") except: - string = string.decode('latin_1') + string = string.decode("latin_1") string = string.replace("\x03", "") string = string.replace("\x00", "") return string - def _convert_to_guid(self, - hex_list, - guid_format = '{3}{2}{1}{0}-{5}{4}-{7}{6}-{8}{9}-{10}{11}{12}{13}{14}{15}'): - ''' + def _convert_to_guid(self, hex_list, guid_format="{3}{2}{1}{0}-{5}{4}-{7}{6}-{8}{9}-{10}{11}{12}{13}{14}{15}"): + """ Shuffel around a list of hexadecimal numbers into the given guid_format - ''' - dec_list = [f'{nr:x}'.upper().rjust(2, '0') for nr in hex_list] - return('{' + guid_format.format(*dec_list) + '}') + """ + dec_list = [f"{nr:x}".upper().rjust(2, "0") for nr in hex_list] + return "{" + guid_format.format(*dec_list) + "}" - def _convert_to_date(self, data_float, origin = '30-12-1899'): - ''' + def _convert_to_date(self, data_float, origin="30-12-1899"): + """ Convert a OLE float to datetime. Set Origin to 1 day back to account for OLE considering 1900 as a leap year - ''' - return(datetime.strptime(origin, '%d-%m-%Y') - + timedelta(seconds = int(data_float*24*60*60))) + """ + return datetime.strptime(origin, "%d-%m-%Y") + timedelta(seconds=int(data_float * 24 * 60 * 60)) - def _typecast(self, data, dtype_in = np.uint8, dtype_out = np.uint32): - ''' + def _typecast(self, data, dtype_in=np.uint8, dtype_out=np.uint32): + """ Change the datatype of given data - ''' - data = np.array(data, dtype = dtype_in) - return(data.view(dtype_out)) + """ + data = np.array(data, dtype=dtype_in) + return data.view(dtype_out) def _transform_ts_properties(self, data, dtype): - ''' + """ For some timestream properties, if the list contains 1 floating-point number return it as a value. Else, paste all entries it into a single string - ''' - cast_list = list(self._typecast(data, dtype_out = dtype)) + """ + cast_list = list(self._typecast(data, dtype_out=dtype)) if dtype == np.float64: [cast_list] = cast_list - return(cast_list) + return cast_list else: - return(self._transform_char(cast_list)) - + return self._transform_char(cast_list) + def _transform_char(self, line): - ''' + """ paste all entries in a given list together - ''' - if type(line) != list: line = [line] - line = ''.join([chr(item) for item in line if chr(item) != '\x00']) + """ + if type(line) != list: + line = [line] + line = "".join([chr(item) for item in line if chr(item) != "\x00"]) return line def _read_ts_properties(self, data, offset, internal_offset, dtype): - ''' + """ Read timestream properties from some data, given an offset, and process the timestream properties - ''' + """ offset_modifier = 8 if (dtype == np.float64) else 2 top_range = offset + internal_offset + offset_modifier - value = self._transform_ts_properties(data[(offset + internal_offset):top_range], dtype) + value = self._transform_ts_properties(data[(offset + internal_offset) : top_range], dtype) internal_offset += offset_modifier - return(value, internal_offset) + return (value, internal_offset) def _get_relevant_section(self, lengths_list, to_compare): - ''' + """ Get the section that contains the given sampling point - ''' + """ try: segment = min([j for j, length in enumerate(lengths_list) if length > to_compare]) except ValueError: segment = len(lengths_list) - return(segment) + return segment def _ensure_list(self, output): """ @@ -1090,4 +1130,4 @@ def _ensure_list(self, output): """ if not isinstance(output, list): return [output] - return output \ No newline at end of file + return output diff --git a/neo/test/iotest/test_nicoletio.py b/neo/test/iotest/test_nicoletio.py index 4f24e2cc3..4c88f5a2c 100644 --- a/neo/test/iotest/test_nicoletio.py +++ b/neo/test/iotest/test_nicoletio.py @@ -35,11 +35,13 @@ def tearDown(self) -> None: super().tearDown() for entity in self.entities_to_test: full_path = get_test_file_full_path(self.ioclass, filename=entity, directory=self.local_test_dir) - #pathlib.Path(full_path).unlink(missing_ok=True) + pathlib.Path(full_path).unlink(missing_ok=True) def test_read_segment_lazy(self): for entity in self.entities_to_test: - r = NicoletIO(filename=get_test_file_full_path(self.ioclass, filename=entity, directory=self.local_test_dir)) + r = NicoletIO( + filename=get_test_file_full_path(self.ioclass, filename=entity, directory=self.local_test_dir) + ) seg = r.read_segment(lazy=True) for ana in seg.analogsignals: assert isinstance(ana, AnalogSignalProxy) @@ -61,12 +63,16 @@ def test_read_segment_lazy(self): def test_read_block(self): for entity in self.entities_to_test: - r = NicoletIO(filename=get_test_file_full_path(self.ioclass, filename=entity, directory=self.local_test_dir)) + r = NicoletIO( + filename=get_test_file_full_path(self.ioclass, filename=entity, directory=self.local_test_dir) + ) bl = r.read_block(lazy=True) def test_read_segment_with_time_slice(self): for entity in self.entities_to_test: - r = NicoletIO(filename=get_test_file_full_path(self.ioclass, filename=entity, directory=self.local_test_dir)) + r = NicoletIO( + filename=get_test_file_full_path(self.ioclass, filename=entity, directory=self.local_test_dir) + ) seg = r.read_segment(time_slice=None) shape_full = seg.analogsignals[0].shape event_full = seg.events[0] diff --git a/neo/test/rawiotest/test_nicoletrawio.py b/neo/test/rawiotest/test_nicoletrawio.py index 77309d551..aebd002e5 100644 --- a/neo/test/rawiotest/test_nicoletrawio.py +++ b/neo/test/rawiotest/test_nicoletrawio.py @@ -10,9 +10,10 @@ class TestNicoletRawIO( unittest.TestCase, ): rawioclass = NicoletRawIO - entities_to_download = ['nicolet'] + entities_to_download = ["nicolet"] entities_to_test = ["nicolet/e_files/test.e"] - + + if __name__ == "__main__": unittest.main() From c1c04378c015053c2dbbbb8c519d43a149c61429 Mon Sep 17 00:00:00 2001 From: MureziCapaul Date: Tue, 15 Jul 2025 09:30:28 +0200 Subject: [PATCH 63/63] Cleanup unused variables --- neo/rawio/nicoletrawio.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/neo/rawio/nicoletrawio.py b/neo/rawio/nicoletrawio.py index 9753ae9fd..fc37f1218 100644 --- a/neo/rawio/nicoletrawio.py +++ b/neo/rawio/nicoletrawio.py @@ -387,13 +387,11 @@ def _get_signal_properties(self): with open(self.filename, "rb") as fid: fid.seek(instance["offset"]) signal_structure = self.read_as_dict(fid, signal_structure_segment) - unknown = self.read_as_list(fid, [("unknown", "S1", 152)]) - fid.seek(512, 1) + fid.seek(664, 1) n_idx = self.read_as_dict(fid, [("n_idx", "uint16"), ("misc1", "uint16", 3)]) for i in range(n_idx["n_idx"]): properties = self.read_as_dict(fid, signal_properties_segment) signal_properties.append(properties) - reserved = self.read_as_list(fid, [("reserved", "S1", 256)]) self.signal_structure = signal_structure self.signal_properties = signal_properties pass @@ -595,6 +593,7 @@ def _get_events(self): if segment_time < event["date"]: seg_index += 1 event["seg_index"] = seg_index + event["block_index"] = 0 events.append(event) event["type"] = "0" if event["duration"] == 0 else "1" except: @@ -745,7 +744,7 @@ def _extract_header_information(self): def _create_signal_channels(self, dtype): """ - Create information for signal channels based on timestream and signal_properties + Create information for signal channels based on channel properties, timestream and signal_properties """ signal_channels = [] signal_streams = {} @@ -848,7 +847,7 @@ def _get_analogsignal_chunk( if i_start < 0 or i_stop > max( self.get_nr_samples(seg_index=seg_index, stream_index=stream_index) - ): # Get the maximum number of samples for the respective sampling rate + ): raise IndexError("Start or Stop Index out of bounds") cum_segment_duration = [0] + list( @@ -970,7 +969,9 @@ def _get_event_timestamps( events = [ event for event in self.events - if event["type"] == str(event_channel_index) and event["seg_index"] == seg_index + if event["type"] == str(event_channel_index) + and event["seg_index"] == seg_index + and event["block_index"] == block_index ] timestamp = np.array([event["timestamp"] for event in events], dtype="float64") durations = np.array([event["duration"] for event in events], dtype="float64")