From 43c10dbab4093c79606e254907daf15d4cf3740b Mon Sep 17 00:00:00 2001
From: "google-labs-jules[bot]"
<161369871+google-labs-jules[bot]@users.noreply.github.com>
Date: Mon, 23 Feb 2026 01:49:42 +0000
Subject: [PATCH 01/10] Update GitHub Actions workflow and Pyraview header
- Update 'Package Binaries' step to use 7-Zip on Windows runners to fix zip failures.
- Simplify packaging logic for non-Windows platforms.
- Update 'Package Toolbox' step to package the MATLAB toolbox programmatically, using the GitHub release tag for the version number and 'Pyraview Team' as the author.
- Create a complete .prj file with a fixed GUID for toolbox packaging.
- Enable 'workflow_dispatch' on release jobs to allow manual triggering without tags.
- Update Pyraview header to include `double startTime` after `nativeRate`, reducing reserved padding to 980 bytes.
- Update C and MATLAB code to handle `startTime` argument.
---
.github/workflows/build_and_release.yml | 63 ++++++++++++++++++++-----
include/pyraview_header.h | 4 +-
src/c/pyraview.c | 43 +++++++++++------
src/matlab/pyraview_mex.c | 30 ++++++++----
4 files changed, 103 insertions(+), 37 deletions(-)
diff --git a/.github/workflows/build_and_release.yml b/.github/workflows/build_and_release.yml
index 3c34b16..fa00ae2 100644
--- a/.github/workflows/build_and_release.yml
+++ b/.github/workflows/build_and_release.yml
@@ -44,21 +44,20 @@ jobs:
# Zip binaries for release (naming by OS/Architecture)
- name: Package Binaries
- if: startsWith(github.ref, 'refs/tags/')
+ if: startsWith(github.ref, 'refs/tags/') || github.event_name == 'workflow_dispatch'
shell: bash
run: |
mkdir -p dist
if [ "${{ runner.os }}" == "Windows" ]; then
- # Windows CMake builds output to bin due to our CMakeLists.txt fix
- zip -j dist/pyraview-win-x64.zip build/bin/*.dll build/bin/*.exe
- elif [ "${{ runner.os }}" == "macOS" ]; then
- zip -j dist/pyraview-mac-arm.zip build/bin/*.dylib build/bin/run_tests
+ # Use 7-Zip (pre-installed on Windows runners)
+ # 'a' is add, '-j' junk paths (like zip -j)
+ 7z a -tzip dist/pyraview-win-x64.zip ./build/bin/*.dll ./build/bin/*.exe
else
- zip -j dist/pyraview-linux-x64.zip build/bin/*.so build/bin/run_tests
+ zip -j dist/pyraview-${{ runner.os }}-${{ runner.arch }}.zip build/bin/*
fi
- name: Upload Artifacts
- if: startsWith(github.ref, 'refs/tags/')
+ if: startsWith(github.ref, 'refs/tags/') || github.event_name == 'workflow_dispatch'
uses: actions/upload-artifact@v4
with:
name: binaries-${{ matrix.os }}
@@ -97,7 +96,7 @@ jobs:
select-by-folder: src/matlab
- name: Upload MEX Artifact
- if: startsWith(github.ref, 'refs/tags/')
+ if: startsWith(github.ref, 'refs/tags/') || github.event_name == 'workflow_dispatch'
uses: actions/upload-artifact@v4
with:
name: mex-${{ matrix.os }}
@@ -107,7 +106,7 @@ jobs:
package-matlab:
name: Package Matlab Toolbox
needs: build-matlab
- if: startsWith(github.ref, 'refs/tags/')
+ if: startsWith(github.ref, 'refs/tags/') || github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
@@ -127,10 +126,50 @@ jobs:
# Run the packaging command
- name: Package Toolbox
uses: matlab-actions/run-command@v2
+ env:
+ GITHUB_REF_NAME: ${{ github.ref_name }}
with:
command: |
- opts = matlab.addons.toolbox.ToolboxOptions('toolboxPackaging.prj');
- matlab.addons.toolbox.packageToolbox(opts);
+ % 1. Use a fixed, permanent UUID for the project
+ % This ensures MATLAB treats every build as an update to the same toolbox
+ guid = '6e14a2b9-7f3c-4d8e-9a1b-3c5d7e9f2a4b';
+
+ % 2. Grab the version from the GitHub Tag environment variable
+ % Default to 1.0.0 if not running in a tagged action
+ version = getenv('GITHUB_REF_NAME');
+ if isempty(version) || ~startsWith(version, 'v')
+ version = '1.0.0';
+ else
+ % Remove the 'v' prefix (e.g., 'v1.2.3' -> '1.2.3')
+ version = erase(version, 'v');
+ end
+
+ % 3. Create a structurally complete, valid Toolbox PRJ XML
+ xmlCode = [...
+ '', ...
+ '', ...
+ '', ...
+ 'Pyraview', ...
+ 'Pyraview Team', ...
+ 'High-performance decimation engine.', ...
+ '' version '', ...
+ '${PROJECT_ROOT}/Pyraview.mltbx', ...
+ '' guid '', ...
+ '${PROJECT_ROOT}/src/matlab', ...
+ '${PROJECT_ROOT}/src/matlab', ...
+ '${PROJECT_ROOT}/Pyraview.mltbx', ...
+ '', ...
+ '/usr/local/matlab', ...
+ ''];
+
+ % 4. Write the file
+ fid = fopen('pyraview.prj', 'w');
+ fprintf(fid, '%s', xmlCode);
+ fclose(fid);
+
+ % 5. Package using the file directly
+ fprintf('Packaging Pyraview version %s with GUID %s...\n', version, guid);
+ matlab.addons.toolbox.packageToolbox('pyraview.prj', 'Pyraview.mltbx');
# Upload the .mltbx as an artifact so the release job can pick it up
- name: Upload Toolbox Artifact
@@ -142,7 +181,7 @@ jobs:
release:
name: Create GitHub Release
needs: [build_and_test, package-matlab]
- if: startsWith(github.ref, 'refs/tags/')
+ if: startsWith(github.ref, 'refs/tags/') || github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest
permissions:
contents: write # Required to create releases
diff --git a/include/pyraview_header.h b/include/pyraview_header.h
index 0097803..3ba70d0 100644
--- a/include/pyraview_header.h
+++ b/include/pyraview_header.h
@@ -42,8 +42,9 @@ typedef PV_ALIGN_PREFIX(64) struct {
uint32_t channelCount; // Number of channels
double sampleRate; // Sample rate of this level
double nativeRate; // Original recording rate
+ double startTime; // Start time of the recording
uint32_t decimationFactor; // Cumulative decimation from raw
- uint8_t reserved[988]; // Padding to 1024 bytes
+ uint8_t reserved[980]; // Padding to 1024 bytes
} PV_ALIGN_SUFFIX(64) PyraviewHeader;
// API Function
@@ -60,6 +61,7 @@ int pyraview_process_chunk(
const int* levelSteps, // Array of decimation factors [100, 10, 10]
int numLevels, // Size of levelSteps array
double nativeRate, // Original recording rate (required for header/validation)
+ double startTime, // Start time of the recording
int numThreads // 0 for auto
);
diff --git a/src/c/pyraview.c b/src/c/pyraview.c
index 9e532eb..b4e8bd8 100644
--- a/src/c/pyraview.c
+++ b/src/c/pyraview.c
@@ -24,7 +24,7 @@
#include
// Utility: Write header
-static void pv_write_header(FILE* f, int channels, int type, double sampleRate, double nativeRate, int decimation) {
+static void pv_write_header(FILE* f, int channels, int type, double sampleRate, double nativeRate, double startTime, int decimation) {
PyraviewHeader h;
memset(&h, 0, sizeof(h));
memcpy(h.magic, "PYRA", 4);
@@ -33,13 +33,14 @@ static void pv_write_header(FILE* f, int channels, int type, double sampleRate,
h.channelCount = channels;
h.sampleRate = sampleRate;
h.nativeRate = nativeRate;
+ h.startTime = startTime;
h.decimationFactor = decimation;
fwrite(&h, sizeof(h), 1, f);
}
// Utility: Validate header
// Returns 1 if valid (or created), 0 if mismatch, -1 if error
-static int pv_validate_or_create(FILE** f_out, const char* filename, int channels, int type, double sampleRate, double nativeRate, int decimation, int append) {
+static int pv_validate_or_create(FILE** f_out, const char* filename, int channels, int type, double sampleRate, double nativeRate, double startTime, int decimation, int append) {
FILE* f = NULL;
if (append) {
f = fopen(filename, "r+b"); // Try open existing for read/write
@@ -61,6 +62,18 @@ static int pv_validate_or_create(FILE** f_out, const char* filename, int channel
fclose(f);
return 0; // Mismatch
}
+ // Verify startTime is valid (not necessarily matching, just valid double)
+ // But usually for appending, start time should be consistent or we accept the existing one.
+ // The prompt says "verify that the startTime in the existing file is valid".
+ // We'll check for NaN or Inf as a basic validity check.
+ if (isnan(h.startTime) || isinf(h.startTime)) {
+ // If it's invalid, maybe fail? Or just proceed?
+ // Given "primary check remains channelCount and dataType", maybe just warn or ignore?
+ // The prompt implies a check. Let's return error if invalid.
+ fclose(f);
+ return -1; // Invalid start time in existing file
+ }
+
// Seek to end
pv_fseek(f, 0, SEEK_END);
*f_out = f;
@@ -71,7 +84,7 @@ static int pv_validate_or_create(FILE** f_out, const char* filename, int channel
f = fopen(filename, "wb"); // Write new
if (!f) return -1;
- pv_write_header(f, channels, type, sampleRate, nativeRate, decimation);
+ pv_write_header(f, channels, type, sampleRate, nativeRate, startTime, decimation);
*f_out = f;
return 1;
}
@@ -88,6 +101,7 @@ static int pv_internal_execute_##SUFFIX( \
const int* steps, \
int nLevels, \
double nativeRate, \
+ double startTime, \
int dataType, \
int nThreads \
) { \
@@ -108,7 +122,7 @@ static int pv_internal_execute_##SUFFIX( \
\
char filename[512]; \
snprintf(filename, sizeof(filename), "%s_L%d.bin", prefix, i+1); \
- int status = pv_validate_or_create(&files[i], filename, (int)C, dataType, rates[i], nativeRate, decimations[i], append); \
+ int status = pv_validate_or_create(&files[i], filename, (int)C, dataType, rates[i], nativeRate, startTime, decimations[i], append); \
if (status <= 0) { \
/* Cleanup previous opens */ \
for (int j = 0; j < i; j++) fclose(files[j]); \
@@ -238,6 +252,7 @@ int pyraview_process_chunk(
const int* levelSteps,
int numLevels,
double nativeRate,
+ double startTime,
int numThreads
) {
// 1. Validate inputs (basic)
@@ -251,25 +266,25 @@ int pyraview_process_chunk(
// Dispatch to typed worker
switch (dataType) {
case PV_INT8: // 0
- return pv_internal_execute_i8((const int8_t*)dataArray, numRows, numCols, layout, filePrefix, append, levelSteps, numLevels, nativeRate, dataType, numThreads);
+ return pv_internal_execute_i8((const int8_t*)dataArray, numRows, numCols, layout, filePrefix, append, levelSteps, numLevels, nativeRate, startTime, dataType, numThreads);
case PV_UINT8: // 1
- return pv_internal_execute_u8((const uint8_t*)dataArray, numRows, numCols, layout, filePrefix, append, levelSteps, numLevels, nativeRate, dataType, numThreads);
+ return pv_internal_execute_u8((const uint8_t*)dataArray, numRows, numCols, layout, filePrefix, append, levelSteps, numLevels, nativeRate, startTime, dataType, numThreads);
case PV_INT16: // 2
- return pv_internal_execute_i16((const int16_t*)dataArray, numRows, numCols, layout, filePrefix, append, levelSteps, numLevels, nativeRate, dataType, numThreads);
+ return pv_internal_execute_i16((const int16_t*)dataArray, numRows, numCols, layout, filePrefix, append, levelSteps, numLevels, nativeRate, startTime, dataType, numThreads);
case PV_UINT16: // 3
- return pv_internal_execute_u16((const uint16_t*)dataArray, numRows, numCols, layout, filePrefix, append, levelSteps, numLevels, nativeRate, dataType, numThreads);
+ return pv_internal_execute_u16((const uint16_t*)dataArray, numRows, numCols, layout, filePrefix, append, levelSteps, numLevels, nativeRate, startTime, dataType, numThreads);
case PV_INT32: // 4
- return pv_internal_execute_i32((const int32_t*)dataArray, numRows, numCols, layout, filePrefix, append, levelSteps, numLevels, nativeRate, dataType, numThreads);
+ return pv_internal_execute_i32((const int32_t*)dataArray, numRows, numCols, layout, filePrefix, append, levelSteps, numLevels, nativeRate, startTime, dataType, numThreads);
case PV_UINT32: // 5
- return pv_internal_execute_u32((const uint32_t*)dataArray, numRows, numCols, layout, filePrefix, append, levelSteps, numLevels, nativeRate, dataType, numThreads);
+ return pv_internal_execute_u32((const uint32_t*)dataArray, numRows, numCols, layout, filePrefix, append, levelSteps, numLevels, nativeRate, startTime, dataType, numThreads);
case PV_INT64: // 6
- return pv_internal_execute_i64((const int64_t*)dataArray, numRows, numCols, layout, filePrefix, append, levelSteps, numLevels, nativeRate, dataType, numThreads);
+ return pv_internal_execute_i64((const int64_t*)dataArray, numRows, numCols, layout, filePrefix, append, levelSteps, numLevels, nativeRate, startTime, dataType, numThreads);
case PV_UINT64: // 7
- return pv_internal_execute_u64((const uint64_t*)dataArray, numRows, numCols, layout, filePrefix, append, levelSteps, numLevels, nativeRate, dataType, numThreads);
+ return pv_internal_execute_u64((const uint64_t*)dataArray, numRows, numCols, layout, filePrefix, append, levelSteps, numLevels, nativeRate, startTime, dataType, numThreads);
case PV_FLOAT32: // 8
- return pv_internal_execute_f32((const float*)dataArray, numRows, numCols, layout, filePrefix, append, levelSteps, numLevels, nativeRate, dataType, numThreads);
+ return pv_internal_execute_f32((const float*)dataArray, numRows, numCols, layout, filePrefix, append, levelSteps, numLevels, nativeRate, startTime, dataType, numThreads);
case PV_FLOAT64: // 9
- return pv_internal_execute_f64((const double*)dataArray, numRows, numCols, layout, filePrefix, append, levelSteps, numLevels, nativeRate, dataType, numThreads);
+ return pv_internal_execute_f64((const double*)dataArray, numRows, numCols, layout, filePrefix, append, levelSteps, numLevels, nativeRate, startTime, dataType, numThreads);
default:
return -1; // Unknown data type
}
diff --git a/src/matlab/pyraview_mex.c b/src/matlab/pyraview_mex.c
index 91b1b79..3306d15 100644
--- a/src/matlab/pyraview_mex.c
+++ b/src/matlab/pyraview_mex.c
@@ -7,13 +7,14 @@
* Gateway for Pyraview C Engine
*
* Usage:
- * status = pyraview_mex(data, prefix, steps, nativeRate, [append], [numThreads])
+ * status = pyraview_mex(data, prefix, steps, nativeRate, startTime, [append], [numThreads])
*
* Inputs:
* data: (Samples x Channels) matrix. uint8, int16, single, or double.
* prefix: char array (string).
* steps: double array of decimation factors (e.g. [100, 10]).
* nativeRate: double scalar.
+ * startTime: double scalar.
* append: (optional) logical/scalar. Default false.
* numThreads: (optional) scalar. Default 0 (auto).
*
@@ -23,8 +24,8 @@
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
// Check inputs
- if (nrhs < 4) {
- mexErrMsgIdAndTxt("Pyraview:InvalidInput", "Usage: pyraview_mex(data, prefix, steps, nativeRate, [append], [numThreads])");
+ if (nrhs < 5) {
+ mexErrMsgIdAndTxt("Pyraview:InvalidInput", "Usage: pyraview_mex(data, prefix, steps, nativeRate, startTime, [append], [numThreads])");
}
// 1. Data
@@ -88,18 +89,26 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
}
double nativeRate = mxGetScalar(prhs[3]);
- // 5. Append (optional)
+ // 5. Start Time
+ if (!mxIsDouble(prhs[4]) || mxGetNumberOfElements(prhs[4]) != 1) {
+ mxFree(levelSteps);
+ mxFree(prefix);
+ mexErrMsgIdAndTxt("Pyraview:InvalidInput", "StartTime must be scalar double.");
+ }
+ double startTime = mxGetScalar(prhs[4]);
+
+ // 6. Append (optional)
int append = 0;
- if (nrhs >= 5) {
- if (mxIsLogical(prhs[4]) || mxIsNumeric(prhs[4])) {
- append = (int)mxGetScalar(prhs[4]);
+ if (nrhs >= 6) {
+ if (mxIsLogical(prhs[5]) || mxIsNumeric(prhs[5])) {
+ append = (int)mxGetScalar(prhs[5]);
}
}
- // 6. NumThreads (optional)
+ // 7. NumThreads (optional)
int numThreads = 0;
- if (nrhs >= 6) {
- numThreads = (int)mxGetScalar(prhs[5]);
+ if (nrhs >= 7) {
+ numThreads = (int)mxGetScalar(prhs[6]);
}
// Call Engine
@@ -115,6 +124,7 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
levelSteps,
(int)numSteps,
nativeRate,
+ startTime,
numThreads
);
From d3fa89310f0155a166961036d03c283a2b3cf744 Mon Sep 17 00:00:00 2001
From: "google-labs-jules[bot]"
<161369871+google-labs-jules[bot]@users.noreply.github.com>
Date: Mon, 23 Feb 2026 02:04:38 +0000
Subject: [PATCH 02/10] Implement PyraviewDataset and update core
- Core: Add `startTime` to Pyraview header (1024 bytes) and update validation logic.
- Core: Implement `pyraview_get_header` for fast metadata scanning.
- Python: Add `PyraviewDataset` class with `get_view_data` for optimal level selection.
- MATLAB: Add `PyraviewDataset.m` and `pyraview_get_header_mex.c`.
- Workflow: Fix Windows zip failures and enable manual release triggers.
- Workflow: Update MATLAB toolbox packaging to use dynamic versioning.
---
include/pyraview_header.h | 7 +
src/c/pyraview.c | 13 ++
src/c/tests/test_main.c | 3 +-
src/matlab/PyraviewDataset.m | 171 +++++++++++++++++++
src/matlab/build_pyraview.m | 6 +-
src/matlab/pyraview_get_header_mex.c | 57 +++++++
src/matlab/test_dataset.m | 58 +++++++
src/python/pyraview.py | 243 ++++++++++++++++++++++++++-
src/python/tests/test_dataset.py | 93 ++++++++++
9 files changed, 648 insertions(+), 3 deletions(-)
create mode 100644 src/matlab/PyraviewDataset.m
create mode 100644 src/matlab/pyraview_get_header_mex.c
create mode 100644 src/matlab/test_dataset.m
create mode 100644 src/python/tests/test_dataset.py
diff --git a/include/pyraview_header.h b/include/pyraview_header.h
index 3ba70d0..6971733 100644
--- a/include/pyraview_header.h
+++ b/include/pyraview_header.h
@@ -65,6 +65,13 @@ int pyraview_process_chunk(
int numThreads // 0 for auto
);
+// Reads just the header from a file
+// Returns 0 on success, -1 on failure
+int pyraview_get_header(
+ const char* filename,
+ PyraviewHeader* header
+);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/c/pyraview.c b/src/c/pyraview.c
index b4e8bd8..feafb1e 100644
--- a/src/c/pyraview.c
+++ b/src/c/pyraview.c
@@ -289,3 +289,16 @@ int pyraview_process_chunk(
return -1; // Unknown data type
}
}
+
+int pyraview_get_header(const char* filename, PyraviewHeader* header) {
+ if (!filename || !header) return -1;
+ FILE* f = fopen(filename, "rb");
+ if (!f) return -1;
+ if (fread(header, sizeof(PyraviewHeader), 1, f) != 1) {
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+ if (memcmp(header->magic, "PYRA", 4) != 0) return -1;
+ return 0;
+}
diff --git a/src/c/tests/test_main.c b/src/c/tests/test_main.c
index 8d0eff2..512d2b2 100644
--- a/src/c/tests/test_main.c
+++ b/src/c/tests/test_main.c
@@ -29,9 +29,10 @@ int run_test(int type, int layout, int channels, int threads) {
remove(fname);
int steps[] = {10};
+ double startTime = 0.0;
int ret = pyraview_process_chunk(
data, rows, cols, type, layout,
- prefix, 0, steps, 1, 100.0, threads
+ prefix, 0, steps, 1, 100.0, startTime, threads
);
free(data);
diff --git a/src/matlab/PyraviewDataset.m b/src/matlab/PyraviewDataset.m
new file mode 100644
index 0000000..453abdf
--- /dev/null
+++ b/src/matlab/PyraviewDataset.m
@@ -0,0 +1,171 @@
+classdef PyraviewDataset < handle
+ properties
+ FolderPath
+ Files
+ NativeRate
+ StartTime
+ Channels
+ DataType
+ end
+
+ methods
+ function obj = PyraviewDataset(folderPath)
+ if ~isfolder(folderPath)
+ error('Pyraview:InvalidFolder', 'Folder not found: %s', folderPath);
+ end
+ obj.FolderPath = folderPath;
+ obj.Files = struct('decimation', {}, 'rate', {}, 'path', {}, 'start_time', {});
+
+ d = dir(fullfile(folderPath, '*_L*.bin'));
+ if isempty(d)
+ error('Pyraview:NoFiles', 'No Pyraview files found in folder.');
+ end
+
+ % Compile MEX if needed? Ideally user compiles before use.
+
+ for i = 1:length(d)
+ fullPath = fullfile(d(i).folder, d(i).name);
+ try
+ h = pyraview_get_header_mex(fullPath);
+ if isempty(obj.NativeRate)
+ obj.NativeRate = h.nativeRate;
+ obj.StartTime = h.startTime;
+ obj.Channels = h.channelCount;
+ obj.DataType = h.dataType;
+ end
+
+ idx = length(obj.Files) + 1;
+ obj.Files(idx).decimation = h.decimationFactor;
+ obj.Files(idx).rate = h.sampleRate;
+ obj.Files(idx).path = fullPath;
+ obj.Files(idx).start_time = h.startTime;
+ catch e
+ warning('Failed to parse %s: %s', fullPath, e.message);
+ end
+ end
+
+ if isempty(obj.Files)
+ error('Pyraview:NoFiles', 'No valid Pyraview files loaded.');
+ end
+
+ % Sort by decimation (ascending -> High Res first)
+ [~, I] = sort([obj.Files.decimation]);
+ obj.Files = obj.Files(I);
+ end
+
+ function [tVec, dataOut] = getData(obj, tStart, tEnd, pixels)
+ duration = tEnd - tStart;
+ if duration <= 0
+ tVec = []; dataOut = []; return;
+ end
+
+ targetRate = pixels / duration;
+
+ % Find optimal file
+ % Files are sorted by decimation ASC (High Res -> Low Res)
+ % Rates are DESC (High Rate -> Low Rate)
+ % We want rate >= targetRate, but as low as possible (coarsest sufficient)
+
+ selectedIdx = 1; % Default high res
+ candidates = find([obj.Files.rate] >= targetRate);
+ if ~isempty(candidates)
+ % Pick the one with min rate (which is the last one in candidates if sorted by rate desc?)
+ % Files sorted by decimation ASC => Rate DESC.
+ % Candidates are indices of files with enough rate.
+ % We want the SMALLEST rate among them.
+ % Since rates are descending, this is the LAST candidate.
+ selectedIdx = candidates(end);
+ end
+
+ fileInfo = obj.Files(selectedIdx);
+
+ % Aperture (3x window)
+ tCenter = (tStart + tEnd) / 2;
+ apStart = tCenter - 1.5 * duration;
+ apEnd = tCenter + 1.5 * duration;
+
+ if apStart < obj.StartTime
+ apStart = obj.StartTime;
+ end
+
+ rate = fileInfo.rate;
+ idxStart = floor((apStart - obj.StartTime) * rate);
+ idxEnd = ceil((apEnd - obj.StartTime) * rate);
+
+ if idxStart < 0, idxStart = 0; end
+ if idxEnd <= idxStart
+ tVec = []; dataOut = []; return;
+ end
+
+ numSamples = idxEnd - idxStart;
+
+ % Reading logic (Channel-Major Planar based on C implementation)
+ % File: Header(1024) + [Ch0 Data] + [Ch1 Data] ...
+ % Data size per sample = 2 * ItemSize (Min/Max)
+
+ f = fopen(fileInfo.path, 'rb');
+ fseek(f, 0, 'eof');
+ fileSize = ftell(f);
+
+ % Determine item size
+ switch obj.DataType
+ case 0, dt = 'int8'; itemSize = 1;
+ case 1, dt = 'uint8'; itemSize = 1;
+ case 2, dt = 'int16'; itemSize = 2;
+ case 3, dt = 'uint16'; itemSize = 2;
+ case 4, dt = 'int32'; itemSize = 4;
+ case 5, dt = 'uint32'; itemSize = 4;
+ case 6, dt = 'int64'; itemSize = 8;
+ case 7, dt = 'uint64'; itemSize = 8;
+ case 8, dt = 'single'; itemSize = 4;
+ case 9, dt = 'double'; itemSize = 8;
+ otherwise, error('Unknown type');
+ end
+
+ dataArea = fileSize - 1024;
+ frameSize = obj.Channels * 2 * itemSize;
+ % Wait, if it's planar, samplesPerChannel = dataArea / (Channels * 2 * ItemSize)
+ samplesPerChannel = floor(dataArea / (obj.Channels * 2 * itemSize));
+
+ if idxStart >= samplesPerChannel
+ fclose(f);
+ tVec = []; dataOut = []; return;
+ end
+
+ if idxEnd > samplesPerChannel
+ idxEnd = samplesPerChannel;
+ numSamples = idxEnd - idxStart;
+ end
+
+ % Read
+ % Output: [Samples x (Channels*2)]
+ dataOut = zeros(numSamples, obj.Channels * 2, dt);
+
+ for ch = 1:obj.Channels
+ chOffset = 1024 + ((ch-1) * samplesPerChannel * 2 * itemSize);
+ readOffset = chOffset + (idxStart * 2 * itemSize);
+
+ fseek(f, readOffset, 'bof');
+ raw = fread(f, numSamples * 2, ['*' dt]);
+
+ % raw is column vector [Min0; Max0; Min1; Max1...]
+ % We want to map to dataOut columns (2*ch-1) and (2*ch)
+ % MATLAB is 1-based.
+ % Col 1: Min, Col 2: Max for Ch1
+
+ % raw(1:2:end) -> Min
+ % raw(2:2:end) -> Max
+ if ~isempty(raw)
+ dataOut(1:length(raw)/2, (ch-1)*2 + 1) = raw(1:2:end);
+ dataOut(1:length(raw)/2, (ch-1)*2 + 2) = raw(2:2:end);
+ end
+ end
+ fclose(f);
+
+ % Time vector
+ % t = start + (idx / rate)
+ indices = (idxStart : (idxStart + numSamples - 1))';
+ tVec = obj.StartTime + double(indices) / rate;
+ end
+ end
+end
diff --git a/src/matlab/build_pyraview.m b/src/matlab/build_pyraview.m
index a08acb5..cf3bd6e 100644
--- a/src/matlab/build_pyraview.m
+++ b/src/matlab/build_pyraview.m
@@ -17,7 +17,11 @@
fprintf('Building Pyraview MEX...\n');
try
mex('-v', include_path, src_path, mex_src, omp_flags);
- fprintf('Build successful.\n');
+ fprintf('Build pyraview_mex successful.\n');
+
+ fprintf('Building pyraview_get_header_mex...\n');
+ mex('-v', include_path, src_path, 'pyraview_get_header_mex.c');
+ fprintf('Build pyraview_get_header_mex successful.\n');
catch e
fprintf('Build failed: %s\n', e.message);
end
diff --git a/src/matlab/pyraview_get_header_mex.c b/src/matlab/pyraview_get_header_mex.c
new file mode 100644
index 0000000..56ec790
--- /dev/null
+++ b/src/matlab/pyraview_get_header_mex.c
@@ -0,0 +1,57 @@
+#include "mex.h"
+#include "../../include/pyraview_header.h"
+#include
+
+/*
+ * pyraview_get_header_mex.c
+ * MEX wrapper for pyraview_get_header
+ *
+ * Usage:
+ * header = pyraview_get_header_mex(filename)
+ *
+ * Inputs:
+ * filename: char array (string).
+ *
+ * Outputs:
+ * header: struct with fields:
+ * version, dataType, channelCount, sampleRate, nativeRate, startTime, decimationFactor
+ */
+
+void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
+ if (nrhs != 1) {
+ mexErrMsgIdAndTxt("Pyraview:InvalidInput", "Usage: pyraview_get_header_mex(filename)");
+ }
+
+ if (!mxIsChar(prhs[0])) {
+ mexErrMsgIdAndTxt("Pyraview:InvalidInput", "Filename must be a string.");
+ }
+ char *filename = mxArrayToString(prhs[0]);
+
+ PyraviewHeader h;
+ if (pyraview_get_header(filename, &h) != 0) {
+ mxFree(filename);
+ mexErrMsgIdAndTxt("Pyraview:ReadError", "Failed to read Pyraview header from %s", filename);
+ }
+ mxFree(filename);
+
+ const char *field_names[] = {
+ "version",
+ "dataType",
+ "channelCount",
+ "sampleRate",
+ "nativeRate",
+ "startTime",
+ "decimationFactor"
+ };
+ int n_fields = 7;
+
+ plhs[0] = mxCreateStructMatrix(1, 1, n_fields, field_names);
+
+ mxSetField(plhs[0], 0, "version", mxCreateDoubleScalar((double)h.version));
+ mxSetField(plhs[0], 0, "dataType", mxCreateDoubleScalar((double)h.dataType));
+ mxSetField(plhs[0], 0, "channelCount", mxCreateDoubleScalar((double)h.channelCount));
+ mxSetField(plhs[0], 0, "sampleRate", mxCreateDoubleScalar(h.sampleRate));
+ mxSetField(plhs[0], 0, "nativeRate", mxCreateDoubleScalar(h.nativeRate));
+ mxSetField(plhs[0], 0, "startTime", mxCreateDoubleScalar(h.startTime));
+ mxSetField(plhs[0], 0, "decimationFactor", mxCreateDoubleScalar((double)h.decimationFactor));
+}
diff --git a/src/matlab/test_dataset.m b/src/matlab/test_dataset.m
new file mode 100644
index 0000000..ac0b39a
--- /dev/null
+++ b/src/matlab/test_dataset.m
@@ -0,0 +1,58 @@
+classdef test_dataset < matlab.unittest.TestCase
+ properties
+ TestDataDir
+ end
+
+ methods(TestMethodSetup)
+ function createData(testCase)
+ testCase.TestDataDir = tempname;
+ mkdir(testCase.TestDataDir);
+
+ % Generate dummy data
+ Fs = 1000;
+ T = 10;
+ t = 0:1/Fs:T-1/Fs;
+ data = [sin(2*pi*t)' .* 1000, t' .* 100];
+ data = int16(data);
+
+ prefix = fullfile(testCase.TestDataDir, 'test_data');
+ steps = [10, 10];
+ start_time = 100.0;
+
+ % Call MEX
+ pyraview_mex(data, prefix, steps, Fs, start_time);
+ end
+ end
+
+ methods(TestMethodTeardown)
+ function removeData(testCase)
+ rmdir(testCase.TestDataDir, 's');
+ end
+ end
+
+ methods(Test)
+ function testConstructor(testCase)
+ ds = PyraviewDataset(testCase.TestDataDir);
+ testCase.verifyEqual(ds.NativeRate, 1000);
+ testCase.verifyEqual(ds.StartTime, 100.0);
+ testCase.verifyEqual(length(ds.Files), 2);
+ end
+
+ function testGetData(testCase)
+ ds = PyraviewDataset(testCase.TestDataDir);
+ t_start = 100.0;
+ t_end = 110.0;
+ pixels = 50; % low resolution
+
+ [t, d] = ds.getData(t_start, t_end, pixels);
+
+ testCase.verifyNotEmpty(t);
+ testCase.verifyEqual(size(d, 2), 4); % 2 ch * 2
+
+ % Check basic values
+ % d(:, 2) is Max Ch0. Should include positive sine peaks (approx 1000)
+ mx = max(d(:, 2));
+ testCase.verifyTrue(mx > 900);
+ end
+ end
+end
diff --git a/src/python/pyraview.py b/src/python/pyraview.py
index 28badd0..66265c2 100644
--- a/src/python/pyraview.py
+++ b/src/python/pyraview.py
@@ -51,11 +51,30 @@ def _find_library():
ctypes.POINTER(ctypes.c_int), # levelSteps
ctypes.c_int, # numLevels
ctypes.c_double, # nativeRate
+ ctypes.c_double, # startTime
ctypes.c_int # numThreads
]
_lib.pyraview_process_chunk.restype = ctypes.c_int
-def process_chunk(data, file_prefix, level_steps, native_rate, append=False, layout='SxC', num_threads=0):
+# Define Header Struct
+class PyraviewHeader(ctypes.Structure):
+ _pack_ = 64
+ _fields_ = [
+ ("magic", ctypes.c_char * 4),
+ ("version", ctypes.c_uint32),
+ ("dataType", ctypes.c_uint32),
+ ("channelCount", ctypes.c_uint32),
+ ("sampleRate", ctypes.c_double),
+ ("nativeRate", ctypes.c_double),
+ ("startTime", ctypes.c_double),
+ ("decimationFactor", ctypes.c_uint32),
+ ("reserved", ctypes.c_uint8 * 980)
+ ]
+
+_lib.pyraview_get_header.argtypes = [ctypes.c_char_p, ctypes.POINTER(PyraviewHeader)]
+_lib.pyraview_get_header.restype = ctypes.c_int
+
+def process_chunk(data, file_prefix, level_steps, native_rate, start_time=0.0, append=False, layout='SxC', num_threads=0):
"""
Process a chunk of data and append to pyramid files.
@@ -64,6 +83,7 @@ def process_chunk(data, file_prefix, level_steps, native_rate, append=False, lay
file_prefix (str): Base name for output files (e.g. "data/myfile").
level_steps (list[int]): Decimation factors for each level (e.g. [100, 10, 10]).
native_rate (float): Original sampling rate.
+ start_time (float): Start time of the recording.
append (bool): If True, append to existing files. If False, create new.
layout (str): 'SxC' (Sample-Major) or 'CxS' (Channel-Major). Default 'SxC'.
num_threads (int): Number of threads (0 for auto).
@@ -128,6 +148,7 @@ def process_chunk(data, file_prefix, level_steps, native_rate, append=False, lay
c_level_steps,
ctypes.c_int(len(level_steps)),
ctypes.c_double(native_rate),
+ ctypes.c_double(start_time),
ctypes.c_int(num_threads)
)
@@ -135,3 +156,223 @@ def process_chunk(data, file_prefix, level_steps, native_rate, append=False, lay
raise RuntimeError(f"Pyraview processing failed with code {ret}")
return ret
+
+class PyraviewDataset:
+ def __init__(self, folder_path):
+ """
+ Initialize dataset by scanning the folder for pyramid files.
+ """
+ self.folder_path = folder_path
+ self.files = [] # list of dicts: {level, decimation, rate, path, start_time}
+ self.native_rate = None
+ self.start_time = None
+ self.channels = None
+ self.data_type = None
+
+ if not os.path.exists(folder_path):
+ raise FileNotFoundError(f"Folder not found: {folder_path}")
+
+ # Scan for _L*.bin files
+ for f in os.listdir(folder_path):
+ if f.endswith(".bin") and "_L" in f:
+ full_path = os.path.join(folder_path, f)
+ h = PyraviewHeader()
+ if _lib.pyraview_get_header(full_path.encode('utf-8'), ctypes.byref(h)) == 0:
+ # Parse level from filename? Or rely on decimation?
+ # Filename format: prefix_L{level}.bin
+ # We can use decimationFactor to order them.
+
+ if self.native_rate is None:
+ self.native_rate = h.nativeRate
+ self.start_time = h.startTime
+ self.channels = h.channelCount
+ self.data_type = h.dataType
+
+ # Store info
+ self.files.append({
+ 'decimation': h.decimationFactor,
+ 'rate': h.sampleRate,
+ 'path': full_path,
+ 'start_time': h.startTime
+ })
+
+ if not self.files:
+ raise RuntimeError("No valid Pyraview files found in folder.")
+
+ # Sort by decimation (ascending) -> High res to low res
+ self.files.sort(key=lambda x: x['decimation'])
+
+ def get_view_data(self, t_start, t_end, pixels):
+ """
+ Get data for a time range, optimizing for pixel width.
+ Returns (time_vector, data_matrix).
+ """
+ duration = t_end - t_start
+ if duration <= 0:
+ return np.array([]), np.array([])
+
+ # Required sample rate to satisfy pixels
+ # We want approx 'pixels' samples in 'duration'
+ # target_rate = pixels / duration
+ # But we actually have min/max pairs. So we need pixels/2 pairs?
+ # Standard approach: We want 'pixels' data points.
+ # Since each sample is min/max (2 values), we need 'pixels/2' effective source samples?
+ # Or does 'pixels' mean screen pixels? Usually 1 sample per pixel column.
+ # Let's assume we want at least 'pixels' aggregated samples.
+
+ target_rate = pixels / duration
+
+ # Find best level
+ selected_file = self.files[0] # Default to highest res
+ for f in self.files:
+ # If this file's rate is sufficient (>= target), pick it.
+ # We iterate from high res (low decimation) to low res.
+ # Actually we want the *lowest* res that is still sufficient.
+ # So we should iterate from low res (high decimation) to high res?
+ pass
+
+ # Better: Filter for files with rate >= target_rate, then pick the one with lowest rate (highest decimation)
+ candidates = [f for f in self.files if f['rate'] >= target_rate]
+ if candidates:
+ # Pick the one with the lowest rate (highest decimation) among candidates
+ # This gives us the coarsest level that still meets the requirement
+ selected_file = min(candidates, key=lambda x: x['rate'])
+ else:
+ # If none meet requirement (zoomed in too far), pick highest res (index 0)
+ selected_file = self.files[0]
+
+ # Calculate aperture (3x window)
+ window = duration
+ t_center = (t_start + t_end) / 2
+ aperture_start = t_center - 1.5 * window
+ aperture_end = t_center + 1.5 * window
+
+ # Clamp to file bounds?
+ # We don't know file duration from header easily without file size.
+ # But start_time is known.
+ if aperture_start < self.start_time:
+ aperture_start = self.start_time
+
+ # Convert time to sample indices
+ # Index = (t - start_time) * sample_rate
+ rel_start = aperture_start - self.start_time
+ rel_end = aperture_end - self.start_time
+
+ idx_start = int(rel_start * selected_file['rate'])
+ idx_end = int(rel_end * selected_file['rate'])
+
+ if idx_start < 0: idx_start = 0
+ if idx_end <= idx_start: return np.array([]), np.array([])
+
+ num_samples_to_read = idx_end - idx_start
+
+ # Map file
+ # Header is 1024 bytes.
+ # Data size depends on type.
+ dtype_map_rev = {
+ 0: np.int8, 1: np.uint8,
+ 2: np.int16, 3: np.uint16,
+ 4: np.int32, 5: np.uint32,
+ 6: np.int64, 7: np.uint64,
+ 8: np.float32, 9: np.float64
+ }
+ dt = dtype_map_rev.get(self.data_type, np.float64)
+ item_size = np.dtype(dt).itemsize
+
+ # Layout is CxS (1) or SxC (0)?
+ # The writer usually does CxS for MATLAB compatibility, but let's check.
+ # Wait, the writer code shows logic for both. But `pyraview.c` usually writes contiguous blocks per channel?
+ # Actually `pyraview_process_chunk` writes `fwrite(buffers[i], ...)` inside a loop over channels:
+ # `for (ch = 0; ch < C; ch++) ... fwrite(...)`.
+ # This implies the file format is Channel-Major (blocks of channel data).
+ # Channel 0 [all samples], Channel 1 [all samples]...
+ # Wait, the `fwrite` is per channel, per level.
+ # If we have multiple chunks appended, the file structure becomes:
+ # [Header]
+ # [Ch0_Chunk1][Ch1_Chunk1]...
+ # [Ch0_Chunk2][Ch1_Chunk2]...
+ # This is strictly not purely Channel-Major if appended. It's Chunk-Interleaved.
+ # BUT, the `pyraview_process_chunk` function is usually called once for the whole file in offline processing,
+ # OR if appending, it's appended in chunks.
+ # If it's chunked, random access by time is hard without an index.
+ # HOWEVER, the prompt implies "Time-to-sample-index conversion".
+ # If the file is just one big chunk (offline conversion), then it's:
+ # [Ch0 L1][Ch1 L1]...
+
+ # If we assume standard "One Big Write" (no append loops in valid use case for random access):
+ # The file is Ch0_All, Ch1_All...
+ # We need to know total samples per channel to jump to Ch1.
+ # File size = 1024 + Channels * Samples * 2 * ItemSize.
+ # Samples = (FileSize - 1024) / (Channels * 2 * ItemSize).
+
+ file_size = os.path.getsize(selected_file['path'])
+ data_area = file_size - 1024
+ frame_size = self.channels * 2 * item_size # 2 for min/max
+ total_samples = data_area // frame_size # This assumes interleaved SxC or Blocked CxS?
+
+ # Re-reading `pyraview.c`:
+ # `for (ch = 0; ch < C; ch++) { ... fwrite(...) }`
+ # It writes ALL data for Channel 0, then ALL data for Channel 1.
+ # So it is Channel-Major Planar.
+ # [Header][Ch0 MinMax...][Ch1 MinMax...]
+
+ samples_per_channel = data_area // (self.channels * 2 * item_size)
+
+ if idx_start >= samples_per_channel:
+ return np.array([]), np.array([])
+
+ if idx_end > samples_per_channel:
+ idx_end = samples_per_channel
+ num_samples_to_read = idx_end - idx_start
+
+ # We need to read 'num_samples_to_read' from EACH channel.
+ # Ch0 Offset = 1024 + idx_start * 2 * item_size
+ # Ch1 Offset = 1024 + (samples_per_channel * 2 * item_size) + (idx_start * 2 * item_size)
+
+ # Read logic
+ data_out = np.zeros((num_samples_to_read, self.channels * 2), dtype=dt)
+
+ with open(selected_file['path'], 'rb') as f:
+ for ch in range(self.channels):
+ # Calculate offset
+ ch_start_offset = 1024 + (ch * samples_per_channel * 2 * item_size)
+ read_offset = ch_start_offset + (idx_start * 2 * item_size)
+
+ f.seek(read_offset)
+ raw = f.read(num_samples_to_read * 2 * item_size)
+ # Parse
+ ch_data = np.frombuffer(raw, dtype=dt)
+
+ # Interleave into output?
+ # Output format: Rows=Samples, Cols=Channels*2 (Min,Max,Min,Max...)
+ # data_out[:, 2*ch] = ch_data[0::2]
+ # data_out[:, 2*ch+1] = ch_data[1::2]
+ # Or just keep it separate?
+ # Let's return (Samples x Channels*2)
+
+ # Check bounds (short read?)
+ read_len = len(ch_data)
+ if read_len > 0:
+ # Direct assign might fail if shapes mismatch due to short read
+ # Reshape ch_data to (N, 2)?
+ # ch_data is flat Min0, Max0, Min1, Max1...
+ # We want to place it in data_out
+
+ # Ensure alignment
+ limit = min(num_samples_to_read * 2, read_len)
+ # We have 'limit' values.
+ # We need to distribute them.
+ # data_out is (N, C*2).
+ # We want data_out[:, 2*ch] and data_out[:, 2*ch+1]
+
+ # Reshape ch_data to (-1, 2)
+ pairs = ch_data[:limit].reshape(-1, 2)
+ rows = pairs.shape[0]
+ data_out[:rows, 2*ch] = pairs[:, 0]
+ data_out[:rows, 2*ch+1] = pairs[:, 1]
+
+ # Time vector
+ # t = start_time + (idx_start + i) / rate
+ t_vec = self.start_time + (idx_start + np.arange(num_samples_to_read)) / selected_file['rate']
+
+ return t_vec, data_out
diff --git a/src/python/tests/test_dataset.py b/src/python/tests/test_dataset.py
new file mode 100644
index 0000000..ccc67c9
--- /dev/null
+++ b/src/python/tests/test_dataset.py
@@ -0,0 +1,93 @@
+import unittest
+import numpy as np
+import os
+import shutil
+import sys
+import tempfile
+
+sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
+import pyraview
+
+class TestPyraviewDataset(unittest.TestCase):
+ def setUp(self):
+ self.test_dir = tempfile.mkdtemp()
+ self.prefix = os.path.join(self.test_dir, "test_data")
+ self.start_time = 100.0
+ self.rate = 1000.0
+
+ # Create dummy data
+ # 2 channels, 10000 samples.
+ # Ch0: Sine wave 1Hz
+ # Ch1: Ramp
+ t = np.arange(10000) / self.rate
+ ch0 = (np.sin(2 * np.pi * t) * 1000).astype(np.int16)
+ ch1 = (t * 100).astype(np.int16)
+ data = np.stack([ch0, ch1], axis=1)
+
+ # Process chunk
+ # Levels: [10, 10] -> L1 (100Hz), L2 (10Hz)
+ steps = [10, 10]
+ pyraview.process_chunk(data, self.prefix, steps, self.rate, start_time=self.start_time)
+
+ self.dataset = pyraview.PyraviewDataset(self.test_dir)
+
+ def tearDown(self):
+ shutil.rmtree(self.test_dir)
+
+ def test_metadata(self):
+ self.assertEqual(self.dataset.native_rate, self.rate)
+ self.assertEqual(self.dataset.start_time, self.start_time)
+ self.assertEqual(self.dataset.channels, 2)
+ # Check levels
+ # Should have L1 (dec 10) and L2 (dec 100)
+ self.assertEqual(len(self.dataset.files), 2)
+ self.assertEqual(self.dataset.files[0]['decimation'], 10)
+ self.assertEqual(self.dataset.files[1]['decimation'], 100)
+
+ def test_get_view_data(self):
+ # Request full duration (10s) with low pixel count -> should pick L2
+ t_start = self.start_time
+ t_end = self.start_time + 10.0
+ pixels = 50 # 50 pixels for 10s -> 5Hz required. L2 is 10Hz. L2 should be picked.
+
+ t, data = self.dataset.get_view_data(t_start, t_end, pixels)
+
+ # L2 Rate is 10Hz. 10s -> 100 samples.
+ # Aperture is 3x window -> 30s? No, logic clamps to file bounds if implemented correctly or just reads available.
+ # Window is 10s. Center 105. Aperture 90 to 120.
+ # File covers 100 to 110.
+ # So we request 90 to 120. Clamped start to 100.
+ # End is 110 (100 samples).
+ # Wait, if aperture logic requests beyond end, it should clamp?
+ # My python logic clamps start but handles short read at end via file size.
+
+ # Expectation: We read from 100.0 to 110.0 (end of file).
+ # L2 has 100 samples.
+ self.assertTrue(len(t) > 0)
+ self.assertTrue(len(t) <= 100) # Could be less if aperture calculation aligns differently
+
+ # Check content roughly
+ # Ch0 L2 should be decimated sine. Min/Max around -1000/1000
+ # Data format is [Min0 Max0 Min1 Max1]
+
+ # Verify columns
+ self.assertEqual(data.shape[1], 4) # 2 channels * 2
+
+ def test_zoom_in(self):
+ # Request small duration (1s) with high pixels -> should pick L1
+ t_start = self.start_time + 1.0
+ t_end = self.start_time + 2.0
+ pixels = 200 # 200 Hz required. L1 is 100Hz. Native is 1000Hz.
+ # Wait, L1 is 100Hz. 200Hz required -> L1 is insufficient?
+ # Logic: candidates = rate >= target.
+ # If target 200, L1(100) and L2(10) fail.
+ # Fallback to index 0 (L1).
+
+ t, data = self.dataset.get_view_data(t_start, t_end, pixels)
+
+ # Should get L1 data.
+ # 1s duration. 100Hz. Approx 100 samples (maybe 3x due to aperture).
+ self.assertTrue(len(t) > 0)
+
+if __name__ == '__main__':
+ unittest.main()
From a2992d414d51d838c31e1d29aac6c284a8fef9fe Mon Sep 17 00:00:00 2001
From: "google-labs-jules[bot]"
<161369871+google-labs-jules[bot]@users.noreply.github.com>
Date: Mon, 23 Feb 2026 14:20:11 +0000
Subject: [PATCH 03/10] Implement PyraviewDataset and update packaging
- Core: Add `startTime` to Pyraview header (1024 bytes) and update validation logic.
- Core: Implement `pyraview_get_header` for fast metadata scanning.
- Python: Add `pyraview.PyraviewDataset` class with `get_view_data` for optimal level selection.
- MATLAB: Add `+pyraview.Dataset` class and `pyraview_get_header_mex.c`.
- Workflow: Fix Windows zip failures and enable manual release triggers.
- Workflow: Update MATLAB toolbox packaging to use native API with fixed GUID.
---
.github/workflows/build_and_release.yml | 57 ++++++++-----------
.../Dataset.m} | 6 +-
.../{ => +pyraview}/pyraview_get_header_mex.c | 0
src/matlab/{ => +pyraview}/pyraview_mex.c | 0
src/matlab/build_pyraview.m | 13 ++++-
.../+pyraview/+unittest/TestDataset.m} | 8 +--
.../{pyraview.py => pyraview/__init__.py} | 4 +-
src/python/tests/test_dataset.py | 2 +-
8 files changed, 45 insertions(+), 45 deletions(-)
rename src/matlab/{PyraviewDataset.m => +pyraview/Dataset.m} (97%)
rename src/matlab/{ => +pyraview}/pyraview_get_header_mex.c (100%)
rename src/matlab/{ => +pyraview}/pyraview_mex.c (100%)
rename src/matlab/{test_dataset.m => tests/+pyraview/+unittest/TestDataset.m} (86%)
rename src/python/{pyraview.py => pyraview/__init__.py} (99%)
diff --git a/.github/workflows/build_and_release.yml b/.github/workflows/build_and_release.yml
index fa00ae2..45d4c7b 100644
--- a/.github/workflows/build_and_release.yml
+++ b/.github/workflows/build_and_release.yml
@@ -130,46 +130,39 @@ jobs:
GITHUB_REF_NAME: ${{ github.ref_name }}
with:
command: |
- % 1. Use a fixed, permanent UUID for the project
- % This ensures MATLAB treats every build as an update to the same toolbox
+ % 1. Define metadata
+ toolboxName = 'Pyraview';
+ % Use the fixed GUID we agreed upon
guid = '6e14a2b9-7f3c-4d8e-9a1b-3c5d7e9f2a4b';
- % 2. Grab the version from the GitHub Tag environment variable
- % Default to 1.0.0 if not running in a tagged action
+ % 2. Get Version from Environment
version = getenv('GITHUB_REF_NAME');
if isempty(version) || ~startsWith(version, 'v')
- version = '1.0.0';
+ version = '0.1.6'; % Fallback
else
- % Remove the 'v' prefix (e.g., 'v1.2.3' -> '1.2.3')
version = erase(version, 'v');
end
- % 3. Create a structurally complete, valid Toolbox PRJ XML
- xmlCode = [...
- '', ...
- '', ...
- '', ...
- 'Pyraview', ...
- 'Pyraview Team', ...
- 'High-performance decimation engine.', ...
- '' version '', ...
- '${PROJECT_ROOT}/Pyraview.mltbx', ...
- '' guid '', ...
- '${PROJECT_ROOT}/src/matlab', ...
- '${PROJECT_ROOT}/src/matlab', ...
- '${PROJECT_ROOT}/Pyraview.mltbx', ...
- '', ...
- '/usr/local/matlab', ...
- ''];
-
- % 4. Write the file
- fid = fopen('pyraview.prj', 'w');
- fprintf(fid, '%s', xmlCode);
- fclose(fid);
-
- % 5. Package using the file directly
- fprintf('Packaging Pyraview version %s with GUID %s...\n', version, guid);
- matlab.addons.toolbox.packageToolbox('pyraview.prj', 'Pyraview.mltbx');
+ % 3. Initialize Options from the MATLAB source folder
+ % This creates the object without needing a .prj file on disk yet
+ opts = matlab.addons.toolbox.ToolboxOptions(fullfile(pwd, 'src', 'matlab'), guid);
+
+ % 4. Set the Required Fields
+ opts.ToolboxName = toolboxName;
+ opts.ToolboxVersion = version;
+ % opts.ToolboxIdentifier = guid; % Passed in constructor
+ opts.AuthorName = 'Pyraview Team';
+ opts.AuthorEmail = '';
+ opts.Description = 'High-performance multi-resolution decimation engine.';
+ opts.OutputFile = fullfile(pwd, 'Pyraview.mltbx');
+
+ % 5. Map the files
+ % We want the root of the toolbox to be the src/matlab folder
+ opts.ToolboxFiles = {fullfile(pwd, 'src', 'matlab')};
+
+ % 6. Package it
+ fprintf('Packaging %s v%s [%s]...\n', toolboxName, version, guid);
+ matlab.addons.toolbox.packageToolbox(opts);
# Upload the .mltbx as an artifact so the release job can pick it up
- name: Upload Toolbox Artifact
diff --git a/src/matlab/PyraviewDataset.m b/src/matlab/+pyraview/Dataset.m
similarity index 97%
rename from src/matlab/PyraviewDataset.m
rename to src/matlab/+pyraview/Dataset.m
index 453abdf..244586b 100644
--- a/src/matlab/PyraviewDataset.m
+++ b/src/matlab/+pyraview/Dataset.m
@@ -1,4 +1,4 @@
-classdef PyraviewDataset < handle
+classdef Dataset < handle
properties
FolderPath
Files
@@ -9,7 +9,7 @@
end
methods
- function obj = PyraviewDataset(folderPath)
+ function obj = Dataset(folderPath)
if ~isfolder(folderPath)
error('Pyraview:InvalidFolder', 'Folder not found: %s', folderPath);
end
@@ -26,7 +26,7 @@
for i = 1:length(d)
fullPath = fullfile(d(i).folder, d(i).name);
try
- h = pyraview_get_header_mex(fullPath);
+ h = pyraview.pyraview_get_header_mex(fullPath);
if isempty(obj.NativeRate)
obj.NativeRate = h.nativeRate;
obj.StartTime = h.startTime;
diff --git a/src/matlab/pyraview_get_header_mex.c b/src/matlab/+pyraview/pyraview_get_header_mex.c
similarity index 100%
rename from src/matlab/pyraview_get_header_mex.c
rename to src/matlab/+pyraview/pyraview_get_header_mex.c
diff --git a/src/matlab/pyraview_mex.c b/src/matlab/+pyraview/pyraview_mex.c
similarity index 100%
rename from src/matlab/pyraview_mex.c
rename to src/matlab/+pyraview/pyraview_mex.c
diff --git a/src/matlab/build_pyraview.m b/src/matlab/build_pyraview.m
index cf3bd6e..6585424 100644
--- a/src/matlab/build_pyraview.m
+++ b/src/matlab/build_pyraview.m
@@ -1,10 +1,14 @@
% build_pyraview.m
% Build script for Pyraview MEX
+% Paths relative to src/matlab/
src_path = '../../src/c/pyraview.c';
-mex_src = 'pyraview_mex.c';
include_path = '-I../../include';
+% Source files inside +pyraview
+mex_src = '+pyraview/pyraview_mex.c';
+header_src = '+pyraview/pyraview_get_header_mex.c';
+
% OpenMP flags (adjust for OS/Compiler)
if ispc
% Windows MSVC usually supports /openmp
@@ -14,13 +18,16 @@
omp_flags = 'CFLAGS="$CFLAGS -fopenmp" LDFLAGS="$LDFLAGS -fopenmp"';
end
+% Output directory: +pyraview/
+out_dir = '+pyraview';
+
fprintf('Building Pyraview MEX...\n');
try
- mex('-v', include_path, src_path, mex_src, omp_flags);
+ mex('-v', '-outdir', out_dir, include_path, src_path, mex_src, omp_flags);
fprintf('Build pyraview_mex successful.\n');
fprintf('Building pyraview_get_header_mex...\n');
- mex('-v', include_path, src_path, 'pyraview_get_header_mex.c');
+ mex('-v', '-outdir', out_dir, include_path, src_path, header_src);
fprintf('Build pyraview_get_header_mex successful.\n');
catch e
fprintf('Build failed: %s\n', e.message);
diff --git a/src/matlab/test_dataset.m b/src/matlab/tests/+pyraview/+unittest/TestDataset.m
similarity index 86%
rename from src/matlab/test_dataset.m
rename to src/matlab/tests/+pyraview/+unittest/TestDataset.m
index ac0b39a..5063bcd 100644
--- a/src/matlab/test_dataset.m
+++ b/src/matlab/tests/+pyraview/+unittest/TestDataset.m
@@ -1,4 +1,4 @@
-classdef test_dataset < matlab.unittest.TestCase
+classdef TestDataset < matlab.unittest.TestCase
properties
TestDataDir
end
@@ -20,7 +20,7 @@ function createData(testCase)
start_time = 100.0;
% Call MEX
- pyraview_mex(data, prefix, steps, Fs, start_time);
+ pyraview.pyraview_mex(data, prefix, steps, Fs, start_time);
end
end
@@ -32,14 +32,14 @@ function removeData(testCase)
methods(Test)
function testConstructor(testCase)
- ds = PyraviewDataset(testCase.TestDataDir);
+ ds = pyraview.Dataset(testCase.TestDataDir);
testCase.verifyEqual(ds.NativeRate, 1000);
testCase.verifyEqual(ds.StartTime, 100.0);
testCase.verifyEqual(length(ds.Files), 2);
end
function testGetData(testCase)
- ds = PyraviewDataset(testCase.TestDataDir);
+ ds = pyraview.Dataset(testCase.TestDataDir);
t_start = 100.0;
t_end = 110.0;
pixels = 50; % low resolution
diff --git a/src/python/pyraview.py b/src/python/pyraview/__init__.py
similarity index 99%
rename from src/python/pyraview.py
rename to src/python/pyraview/__init__.py
index 66265c2..52bb947 100644
--- a/src/python/pyraview.py
+++ b/src/python/pyraview/__init__.py
@@ -7,7 +7,7 @@
def _find_library():
# Priority:
# 1. Environment variable PYRAVIEW_LIB
- # 2. Relative to this file: ../c/libpyraview.so (dev structure)
+ # 2. Relative to this file: ../../c/libpyraview.so (dev structure)
# 3. Current working directory: ./libpyraview.so
lib_name = "libpyraview.so"
@@ -22,7 +22,7 @@ def _find_library():
# Relative to this file
this_dir = os.path.dirname(os.path.abspath(__file__))
- rel_path = os.path.join(this_dir, "..", "c", lib_name)
+ rel_path = os.path.join(this_dir, "..", "..", "c", lib_name)
if os.path.exists(rel_path):
return rel_path
diff --git a/src/python/tests/test_dataset.py b/src/python/tests/test_dataset.py
index ccc67c9..9e22108 100644
--- a/src/python/tests/test_dataset.py
+++ b/src/python/tests/test_dataset.py
@@ -8,7 +8,7 @@
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import pyraview
-class TestPyraviewDataset(unittest.TestCase):
+class TestDataset(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
self.prefix = os.path.join(self.test_dir, "test_data")
From d7b8b2203bf43c377a5b8927b6a39704a967c89c Mon Sep 17 00:00:00 2001
From: "google-labs-jules[bot]"
<161369871+google-labs-jules[bot]@users.noreply.github.com>
Date: Tue, 24 Feb 2026 01:27:51 +0000
Subject: [PATCH 04/10] Implement PyraviewDataset, update core, and fix
workflows
- Core: Add `startTime` to Pyraview header (1024 bytes) and update validation logic.
- Core: Implement `pyraview_get_header` for fast metadata scanning.
- Python: Add `pyraview.PyraviewDataset` class with `get_view_data`.
- MATLAB: Add `+pyraview.Dataset` class and `pyraview_get_header_mex.c`.
- Workflow: Separate into `test.yml` (CI) and `release.yml` (CD).
- Workflow: Fix Windows zip failures using 7z.
- Workflow: Update MATLAB toolbox packaging to use native API with fixed GUID.
---
.../{build_and_release.yml => release.yml} | 48 ++++---------
.github/workflows/test.yml | 72 +++++++++++++++++++
2 files changed, 84 insertions(+), 36 deletions(-)
rename .github/workflows/{build_and_release.yml => release.yml} (71%)
create mode 100644 .github/workflows/test.yml
diff --git a/.github/workflows/build_and_release.yml b/.github/workflows/release.yml
similarity index 71%
rename from .github/workflows/build_and_release.yml
rename to .github/workflows/release.yml
index 45d4c7b..5e91577 100644
--- a/.github/workflows/build_and_release.yml
+++ b/.github/workflows/release.yml
@@ -1,16 +1,15 @@
-name: Build and Release Pyraview
+name: Release Pyraview
on:
+ release:
+ types: [published]
push:
- branches: [ main ]
tags: [ 'v*' ]
- pull_request:
- branches: [ main ]
- workflow_dispatch: # Allows manual triggering
+ workflow_dispatch:
jobs:
build_and_test:
- name: Build & Test (${{ matrix.os }})
+ name: Build & Package (${{ matrix.os }})
runs-on: ${{ matrix.os }}
strategy:
matrix:
@@ -36,15 +35,8 @@ jobs:
- name: Build All
run: cmake --build build --parallel
- - name: Run C Tests
- shell: bash
- run: |
- cd build
- ctest --output-on-failure
-
# Zip binaries for release (naming by OS/Architecture)
- name: Package Binaries
- if: startsWith(github.ref, 'refs/tags/') || github.event_name == 'workflow_dispatch'
shell: bash
run: |
mkdir -p dist
@@ -57,15 +49,13 @@ jobs:
fi
- name: Upload Artifacts
- if: startsWith(github.ref, 'refs/tags/') || github.event_name == 'workflow_dispatch'
uses: actions/upload-artifact@v4
with:
name: binaries-${{ matrix.os }}
path: dist/*
build-matlab:
- name: Build & Test Matlab (${{ matrix.os }})
- needs: build_and_test
+ name: Build Matlab MEX (${{ matrix.os }})
runs-on: ${{ matrix.os }}
strategy:
matrix:
@@ -81,32 +71,19 @@ jobs:
- name: Compile MEX
uses: matlab-actions/run-command@v2
with:
- # Enable OpenMP if supported by the platform (simple check or flag)
- # For Ubuntu (GCC): -fopenmp
- # For Windows (MSVC): -openmp (or implied via /openmp)
- # For macOS (Clang): -Xpreprocessor -fopenmp -lomp (but requires libomp)
- # To keep it simple and avoid linker errors on stock runners without libomp, we skip explicit OMP flags for now or use safe defaults.
- # But pyraview.c has #include . If we don't link OMP, it might fail if _OPENMP is defined by default but library isn't linked.
- # Let's try compiling WITHOUT flags first, relying on the source's #ifdef _OPENMP guards.
- command: mex -v src/matlab/pyraview_mex.c src/c/pyraview.c -Iinclude -output src/matlab/pyraview
-
- - name: Run Matlab Tests
- uses: matlab-actions/run-tests@v2
- with:
- select-by-folder: src/matlab
+ # Run the updated build script which handles paths correctly
+ command: cd('src/matlab'); build_pyraview;
- name: Upload MEX Artifact
- if: startsWith(github.ref, 'refs/tags/') || github.event_name == 'workflow_dispatch'
uses: actions/upload-artifact@v4
with:
name: mex-${{ matrix.os }}
- path: src/matlab/pyraview.* # Matches pyraview.mexw64, .mexa64, etc.
+ path: src/matlab/+pyraview/*.mex* # Capture all MEX files in the package
if-no-files-found: error
package-matlab:
name: Package Matlab Toolbox
needs: build-matlab
- if: startsWith(github.ref, 'refs/tags/') || github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
@@ -119,7 +96,7 @@ jobs:
- name: Download all MEX artifacts
uses: actions/download-artifact@v4
with:
- path: src/matlab
+ path: src/matlab/+pyraview
pattern: mex-*
merge-multiple: true
@@ -145,12 +122,12 @@ jobs:
% 3. Initialize Options from the MATLAB source folder
% This creates the object without needing a .prj file on disk yet
- opts = matlab.addons.toolbox.ToolboxOptions(fullfile(pwd, 'src', 'matlab'), guid);
+ opts = matlab.addons.toolbox.ToolboxOptions(fullfile(pwd, 'src', 'matlab'));
% 4. Set the Required Fields
opts.ToolboxName = toolboxName;
opts.ToolboxVersion = version;
- % opts.ToolboxIdentifier = guid; % Passed in constructor
+ opts.ToolboxIdentifier = guid;
opts.AuthorName = 'Pyraview Team';
opts.AuthorEmail = '';
opts.Description = 'High-performance multi-resolution decimation engine.';
@@ -174,7 +151,6 @@ jobs:
release:
name: Create GitHub Release
needs: [build_and_test, package-matlab]
- if: startsWith(github.ref, 'refs/tags/') || github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest
permissions:
contents: write # Required to create releases
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
new file mode 100644
index 0000000..218be4d
--- /dev/null
+++ b/.github/workflows/test.yml
@@ -0,0 +1,72 @@
+name: Test Pyraview
+
+on:
+ push:
+ branches: [ main ]
+ pull_request:
+ branches: [ main ]
+ workflow_dispatch:
+
+jobs:
+ build_and_test:
+ name: Build & Test (${{ matrix.os }})
+ runs-on: ${{ matrix.os }}
+ strategy:
+ matrix:
+ os: [ubuntu-latest, windows-latest, macos-latest]
+
+ steps:
+ - name: Checkout Code
+ uses: actions/checkout@v4
+
+ - name: Setup CMake
+ uses: lukka/get-cmake@latest
+
+ - name: Configure CMake
+ shell: bash
+ run: |
+ mkdir build
+ if [ "${{ runner.os }}" == "Windows" ]; then
+ cmake -S . -B build -G "MinGW Makefiles" -DCMAKE_BUILD_TYPE=Release
+ else
+ cmake -S . -B build -DCMAKE_BUILD_TYPE=Release
+ fi
+
+ - name: Build All
+ run: cmake --build build --parallel
+
+ - name: Run C Tests
+ shell: bash
+ run: |
+ cd build
+ ctest --output-on-failure
+
+ build-matlab:
+ name: Build & Test Matlab (${{ matrix.os }})
+ needs: build_and_test
+ runs-on: ${{ matrix.os }}
+ strategy:
+ matrix:
+ # Matlab actions support limited OS versions, check availability
+ os: [ubuntu-latest, windows-latest, macos-latest]
+ steps:
+ - uses: actions/checkout@v4
+
+ - uses: matlab-actions/setup-matlab@v2
+ with:
+ release: 'R2024b'
+
+ - name: Compile MEX
+ uses: matlab-actions/run-command@v2
+ with:
+ # Run the updated build script which handles paths correctly
+ command: cd('src/matlab'); build_pyraview;
+
+ - name: Run Matlab Tests
+ uses: matlab-actions/run-command@v2
+ with:
+ command: |
+ addpath('src/matlab');
+ addpath('src/matlab/tests');
+ results = runtests('pyraview.unittest.TestDataset');
+ assert(~any([results.Failed]));
From 4a69923af98d20c48679aecd91971c7a05ec631f Mon Sep 17 00:00:00 2001
From: "google-labs-jules[bot]"
<161369871+google-labs-jules[bot]@users.noreply.github.com>
Date: Tue, 24 Feb 2026 01:41:31 +0000
Subject: [PATCH 05/10] Implement PyraviewDataset, update workflows, and fix
MATLAB paths
- Core: Add `startTime` to Pyraview header (1024 bytes) and update validation logic.
- Core: Implement `pyraview_get_header` for fast metadata scanning.
- Python: Add `pyraview.PyraviewDataset` class with `get_view_data`. Refactor into `pyraview` package.
- MATLAB: Add `+pyraview.Dataset` class and `pyraview_get_header_mex.c`. Refactor into `+pyraview` package.
- Workflow: Separate into `test.yml` (CI) and `release.yml` (CD).
- Workflow: Fix Windows zip failures using 7z.
- Workflow: Update MATLAB toolbox packaging to use native API with fixed GUID.
- Workflow: Fix MATLAB paths in tests and ensure build errors fail CI.
---
src/matlab/build_pyraview.m | 1 +
1 file changed, 1 insertion(+)
diff --git a/src/matlab/build_pyraview.m b/src/matlab/build_pyraview.m
index 6585424..5e5ef38 100644
--- a/src/matlab/build_pyraview.m
+++ b/src/matlab/build_pyraview.m
@@ -31,4 +31,5 @@
fprintf('Build pyraview_get_header_mex successful.\n');
catch e
fprintf('Build failed: %s\n', e.message);
+ rethrow(e);
end
From 876a8c794f45469e20355e21fb7ffb2512d90deb Mon Sep 17 00:00:00 2001
From: "google-labs-jules[bot]"
<161369871+google-labs-jules[bot]@users.noreply.github.com>
Date: Tue, 24 Feb 2026 02:08:59 +0000
Subject: [PATCH 06/10] Implement PyraviewDataset, update workflows, and fix
MATLAB packaging
- Core: Add `startTime` to Pyraview header (1024 bytes) and update validation logic.
- Core: Implement `pyraview_get_header` for fast metadata scanning.
- Python: Add `pyraview.PyraviewDataset` class. Refactor into `pyraview` package.
- MATLAB: Add `+pyraview.Dataset` class. Refactor into `+pyraview` package.
- Workflow: Separate into `test.yml` (CI) and `release.yml` (CD).
- Workflow: Fix Windows zip failures using 7z.
- Workflow: Update MATLAB toolbox packaging to use native API with fixed GUID.
- Workflow: Fix MATLAB build flags and paths.
---
src/matlab/build_pyraview.m | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/src/matlab/build_pyraview.m b/src/matlab/build_pyraview.m
index 5e5ef38..08bfd2d 100644
--- a/src/matlab/build_pyraview.m
+++ b/src/matlab/build_pyraview.m
@@ -15,7 +15,11 @@
omp_flags = 'COMPFLAGS="$COMPFLAGS /openmp"';
else
% GCC/Clang
+ % We need to pass the flags as separate arguments or correctly formatted
omp_flags = 'CFLAGS="$CFLAGS -fopenmp" LDFLAGS="$LDFLAGS -fopenmp"';
+ % The previous attempt caused quoting issues.
+ % Trying with simpler quoting for linux runner environment
+ omp_flags = 'CFLAGS=''$CFLAGS -fopenmp'' LDFLAGS=''$LDFLAGS -fopenmp''';
end
% Output directory: +pyraview/
From 69500da81bac7526c9b688135dd8903381c1bc6b Mon Sep 17 00:00:00 2001
From: "google-labs-jules[bot]"
<161369871+google-labs-jules[bot]@users.noreply.github.com>
Date: Tue, 24 Feb 2026 02:17:24 +0000
Subject: [PATCH 07/10] Implement PyraviewDataset, update header, and refactor
workflows
- Core: Add `startTime` to Pyraview header (1024 bytes) and update validation logic.
- Core: Implement `pyraview_get_header` for fast metadata scanning.
- Python: Add `pyraview.PyraviewDataset` class. Refactor into `pyraview` package.
- MATLAB: Add `+pyraview.Dataset` class. Refactor into `+pyraview` package.
- Workflow: Separate into `test.yml` (CI) and `release.yml` (CD).
- Workflow: Fix Windows zip failures using 7z.
- Workflow: Update MATLAB toolbox packaging to use native API with fixed GUID.
- Workflow: Fix MATLAB build flags and paths.
---
src/matlab/+pyraview/pyraview_get_header_mex.c | 2 +-
src/matlab/+pyraview/pyraview_mex.c | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/matlab/+pyraview/pyraview_get_header_mex.c b/src/matlab/+pyraview/pyraview_get_header_mex.c
index 56ec790..61edf88 100644
--- a/src/matlab/+pyraview/pyraview_get_header_mex.c
+++ b/src/matlab/+pyraview/pyraview_get_header_mex.c
@@ -1,5 +1,5 @@
#include "mex.h"
-#include "../../include/pyraview_header.h"
+#include "pyraview_header.h"
#include
/*
diff --git a/src/matlab/+pyraview/pyraview_mex.c b/src/matlab/+pyraview/pyraview_mex.c
index 3306d15..6afd82f 100644
--- a/src/matlab/+pyraview/pyraview_mex.c
+++ b/src/matlab/+pyraview/pyraview_mex.c
@@ -1,5 +1,5 @@
#include "mex.h"
-#include "../../include/pyraview_header.h"
+#include "pyraview_header.h"
#include
/*
From 31f754c7b4ef4157c56f9baaa11dc5476a53ce33 Mon Sep 17 00:00:00 2001
From: "google-labs-jules[bot]"
<161369871+google-labs-jules[bot]@users.noreply.github.com>
Date: Tue, 24 Feb 2026 13:05:15 +0000
Subject: [PATCH 08/10] Implement PyraviewDataset, update workflows, and fix
MATLAB builds
- Core: Add `startTime` to Pyraview header (1024 bytes) and update validation logic.
- Core: Implement `pyraview_get_header` for fast metadata scanning.
- Python: Add `pyraview.PyraviewDataset` class. Refactor into `pyraview` package.
- MATLAB: Add `+pyraview.Dataset` class. Refactor into `+pyraview` package.
- Workflow: Separate into `test.yml` (CI) and `release.yml` (CD).
- Workflow: Fix Windows zip failures using 7z.
- Workflow: Update MATLAB toolbox packaging to use native API with fixed GUID.
- Workflow: Fix MATLAB build flags and paths.
---
src/matlab/build_pyraview.m | 11 ++++-------
1 file changed, 4 insertions(+), 7 deletions(-)
diff --git a/src/matlab/build_pyraview.m b/src/matlab/build_pyraview.m
index 08bfd2d..37565e7 100644
--- a/src/matlab/build_pyraview.m
+++ b/src/matlab/build_pyraview.m
@@ -12,14 +12,11 @@
% OpenMP flags (adjust for OS/Compiler)
if ispc
% Windows MSVC usually supports /openmp
- omp_flags = 'COMPFLAGS="$COMPFLAGS /openmp"';
+ omp_flags = {'COMPFLAGS="$COMPFLAGS /openmp"'};
else
% GCC/Clang
- % We need to pass the flags as separate arguments or correctly formatted
- omp_flags = 'CFLAGS="$CFLAGS -fopenmp" LDFLAGS="$LDFLAGS -fopenmp"';
- % The previous attempt caused quoting issues.
- % Trying with simpler quoting for linux runner environment
- omp_flags = 'CFLAGS=''$CFLAGS -fopenmp'' LDFLAGS=''$LDFLAGS -fopenmp''';
+ % Pass as separate arguments to avoid quoting issues
+ omp_flags = {'CFLAGS="$CFLAGS -fopenmp"', 'LDFLAGS="$LDFLAGS -fopenmp"'};
end
% Output directory: +pyraview/
@@ -27,7 +24,7 @@
fprintf('Building Pyraview MEX...\n');
try
- mex('-v', '-outdir', out_dir, include_path, src_path, mex_src, omp_flags);
+ mex('-v', '-outdir', out_dir, include_path, src_path, mex_src, omp_flags{:});
fprintf('Build pyraview_mex successful.\n');
fprintf('Building pyraview_get_header_mex...\n');
From c974f8939654e11d5aec77d71794da618899592f Mon Sep 17 00:00:00 2001
From: "google-labs-jules[bot]"
<161369871+google-labs-jules[bot]@users.noreply.github.com>
Date: Tue, 24 Feb 2026 13:23:48 +0000
Subject: [PATCH 09/10] Implement PyraviewDataset, update workflows, and fix
MATLAB builds
- Core: Add `startTime` to Pyraview header (1024 bytes) and update validation logic.
- Core: Implement `pyraview_get_header` for fast metadata scanning.
- Python: Add `pyraview.PyraviewDataset` class. Refactor into `pyraview` package.
- MATLAB: Add `+pyraview.Dataset` class. Refactor into `+pyraview` package.
- Workflow: Separate into `test.yml` (CI) and `release.yml` (CD).
- Workflow: Fix Windows zip failures using 7z.
- Workflow: Update MATLAB toolbox packaging to use native API with fixed GUID.
- Workflow: Fix MATLAB build flags and output names.
---
src/matlab/build_pyraview.m | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/matlab/build_pyraview.m b/src/matlab/build_pyraview.m
index 37565e7..346526f 100644
--- a/src/matlab/build_pyraview.m
+++ b/src/matlab/build_pyraview.m
@@ -24,11 +24,11 @@
fprintf('Building Pyraview MEX...\n');
try
- mex('-v', '-outdir', out_dir, include_path, src_path, mex_src, omp_flags{:});
+ mex('-v', '-outdir', out_dir, '-output', 'pyraview_mex', include_path, src_path, mex_src, omp_flags{:});
fprintf('Build pyraview_mex successful.\n');
fprintf('Building pyraview_get_header_mex...\n');
- mex('-v', '-outdir', out_dir, include_path, src_path, header_src);
+ mex('-v', '-outdir', out_dir, '-output', 'pyraview_get_header_mex', include_path, src_path, header_src);
fprintf('Build pyraview_get_header_mex successful.\n');
catch e
fprintf('Build failed: %s\n', e.message);
From 80c3acfd467332a7ba739e6efce0395e4272b050 Mon Sep 17 00:00:00 2001
From: "google-labs-jules[bot]"
<161369871+google-labs-jules[bot]@users.noreply.github.com>
Date: Tue, 24 Feb 2026 13:46:07 +0000
Subject: [PATCH 10/10] Implement PyraviewDataset, update workflows, and fix
MATLAB builds
- Core: Add `startTime` to Pyraview header (1024 bytes) and update validation logic.
- Core: Implement `pyraview_get_header` for fast metadata scanning.
- Python: Add `pyraview.PyraviewDataset` class. Refactor into `pyraview` package.
- MATLAB: Add `+pyraview.Dataset` class. Refactor into `+pyraview` package.
- Workflow: Separate into `test.yml` (CI) and `release.yml` (CD).
- Workflow: Fix Windows zip failures using 7z.
- Workflow: Update MATLAB toolbox packaging to use native API with fixed GUID.
- Workflow: Fix MATLAB build flags and output names. Disable OpenMP on MacOS.
---
src/matlab/build_pyraview.m | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/src/matlab/build_pyraview.m b/src/matlab/build_pyraview.m
index 346526f..6a3ab15 100644
--- a/src/matlab/build_pyraview.m
+++ b/src/matlab/build_pyraview.m
@@ -13,8 +13,13 @@
if ispc
% Windows MSVC usually supports /openmp
omp_flags = {'COMPFLAGS="$COMPFLAGS /openmp"'};
+elseif ismac
+ % MacOS (Clang) usually requires libomp installed and -Xpreprocessor flags.
+ % For simplicity in CI, we disable OpenMP on Mac.
+ fprintf('MacOS detected: Disabling OpenMP.\n');
+ omp_flags = {};
else
- % GCC/Clang
+ % Linux (GCC)
% Pass as separate arguments to avoid quoting issues
omp_flags = {'CFLAGS="$CFLAGS -fopenmp"', 'LDFLAGS="$LDFLAGS -fopenmp"'};
end