diff --git a/.gitignore b/.gitignore index 62db579fc..a19f2e0d9 100644 --- a/.gitignore +++ b/.gitignore @@ -70,9 +70,6 @@ target/ # PyCharm .idea/ -# Specific files -imswitch/imcontrol/model/managers/lasers/KatanaLaserManager.py -imswitch/imcontrol/model/managers/stands/LeicaDMIManager.py # Mac Files .DS_Store diff --git a/docs/metadata_hub.md b/docs/metadata_hub.md new file mode 100644 index 000000000..43c6571b9 --- /dev/null +++ b/docs/metadata_hub.md @@ -0,0 +1,397 @@ +# Central Metadata Hub Documentation + +## Overview + +The Metadata Hub provides a centralized, structured metadata management system for ImSwitch, with OME-types integration for standards-compliant microscopy metadata. + +## Architecture + +``` +Controllers/Managers + ↓ + SharedAttributes (with typed values) + ↓ +SharedAttrsMetadataBridge (validates & normalizes) + ↓ + MetadataHub (central aggregator) + ├─→ Global metadata store + ├─→ Detector contexts + └─→ Frame event queues + ↓ +Recording/Writing + ├─→ HDF5 attributes + ├─→ OME-Zarr metadata + └─→ OME-TIFF metadata +``` + +## Core Components + +### 1. MetadataHub + +Central aggregator for all metadata: + +```python +from imswitch.imcontrol.model.metadata import MetadataHub + +# Access the hub (initialized in MasterController) +hub = master_controller.metadataHub + +# Update global metadata +hub.update(('Positioner', 'Stage', 'X', 'PositionUm'), 100.5, source='PositionerController') + +# Get latest metadata +global_snapshot = hub.snapshot_global() +detector_snapshot = hub.snapshot_detector('Camera1') + +# Register a detector +from imswitch.imcontrol.model.metadata import DetectorContext +context = DetectorContext( + name='Camera1', + shape_px=(2048, 2048), + pixel_size_um=6.5, + dtype='uint16' +) +hub.register_detector('Camera1', context) +``` + +### 2. DetectorContext + +Stores detector-specific metadata: + +```python +from imswitch.imcontrol.model.metadata import DetectorContext + +context = DetectorContext( + name='Camera1', + shape_px=(2048, 2048), + pixel_size_um=6.5, + dtype='uint16', + binning=1, + channel_name='GFP', + channel_color='00FF00', # Hex color + wavelength_nm=488, + exposure_ms=100, + gain=2.5, + objective_magnification=40.0, + objective_na=1.3 +) + +# Update fields +context.update(exposure_ms=200.0, gain=3.0) + +# Export for storage +context_dict = context.to_dict() + +# Generate OME Pixels object +pixels = context.to_ome_pixels(size_z=10, size_t=5, size_c=1) +``` + +### 3. Metadata Schema + +Standardized keys with units and types: + +```python +from imswitch.imcontrol.model.metadata import MetadataSchema, MetadataCategory + +# Standard categories +categories = [ + MetadataCategory.POSITIONER, + MetadataCategory.ILLUMINATION, + MetadataCategory.OBJECTIVE, + MetadataCategory.DETECTOR, + MetadataCategory.ENVIRONMENT, + MetadataCategory.SYSTEM +] + +# Make a standardized key +key = MetadataSchema.make_key( + category=MetadataCategory.POSITIONER, + device='Stage', + axis_or_sub='X', + field='PositionUm' +) +# Result: ('Positioner', 'Stage', 'X', 'PositionUm') + +# Normalize a value +normalized = MetadataSchema.normalize_value(key, 100.5, source='Controller') +# Returns SharedAttrValue with value=100.5, units='um', dtype='float' +``` + +### 4. Frame Events + +Per-frame metadata for acquisition alignment: + +```python +from imswitch.imcontrol.model.metadata import FrameEvent + +# During acquisition, push events +event = FrameEvent( + frame_number=42, + detector_name='Camera1', + stage_x_um=100.0, + stage_y_um=200.0, + stage_z_um=50.0, + exposure_ms=100.0, + laser_power_mw=10.0 +) +hub.push_frame_event('Camera1', event) + +# During writing, pop events +events = hub.pop_frame_events('Camera1', n=10) + +# Generate OME Plane objects +plane = event.to_ome_plane(the_z=0, the_c=0, the_t=42) +``` + +## Standardized Metadata Keys + +### Positioner Fields +- `PositionUm` (um, float): Position in micrometers +- `SpeedUmS` (um/s, float): Speed in micrometers per second +- `IsHomed` (bool): Whether axis is homed +- `IsMoving` (bool): Whether axis is moving +- `SetpointUm` (um, float): Target position + +### Illumination Fields +- `Enabled` (bool): Whether illumination is enabled +- `WavelengthNm` (nm, float): Wavelength in nanometers +- `PowerMw` (mW, float): Power in milliwatts +- `CurrentMa` (mA, float): Current in milliamps +- `Mode` (str): Operating mode +- `IntensityPercent` (%, float): Intensity as percentage + +### Objective Fields +- `Name` (str): Objective name +- `Magnification` (float): Magnification factor +- `NA` (float): Numerical aperture +- `Immersion` (str): Immersion medium +- `TurretIndex` (int): Turret position +- `WorkingDistanceUm` (um, float): Working distance + +### Detector Fields +- `ExposureMs` (ms, float): Exposure time +- `Gain` (float): Detector gain +- `Binning` (int): Binning factor +- `ROI` (tuple): Region of interest (x, y, w, h) +- `TemperatureC` (C, float): Detector temperature +- `PixelSizeUm` (um, float): Physical pixel size +- `ShapePx` (px, tuple): Detector shape (width, height) +- `BitDepth` (int): Bit depth +- `ReadoutMode` (str): Readout mode + +### Environment Fields +- `TemperatureC` (C, float): Temperature +- `HumidityPercent` (%, float): Relative humidity +- `CO2Percent` (%, float): CO2 concentration +- `PressurePa` (Pa, float): Pressure + +## Integration Points + +### Controllers/Managers + +Controllers should publish metadata using `setSharedAttr` with standardized keys: + +```python +from imswitch.imcontrol.model.metadata import MetadataSchema, MetadataCategory + +class MyPositionerController: + def updatePosition(self, positionerName, axis, position): + # Use standardized key + key = MetadataSchema.make_key( + MetadataCategory.POSITIONER, + positionerName, + axis, + 'PositionUm' + ) + self._commChannel.sharedAttrs[key] = position +``` + +The SharedAttrsMetadataBridge automatically forwards these to the MetadataHub. + +### Recording + +RecordingController automatically enriches attrs with hub metadata: + +```python +# Happens automatically in RecordingController._get_detector_attrs() +attrs = { + 'Positioner:Stage:X:PositionUm': 100.5, + 'Detector:Camera1:ExposureMs': 100.0, + 'Camera1:pixel_size_um': 6.5, + 'Camera1:shape_px': '[2048, 2048]', + 'Camera1:fov_um': '[13312.0, 13312.0]', + '_metadata_hub_global': '{"Positioner": {...}, "Detector": {...}}' +} +``` + +### OME Writers + +Future OME writers can consume hub metadata: + +```python +# Get OME object for all detectors +ome = hub.to_ome(detector_names=['Camera1', 'Camera2']) + +# Or per-detector +context = hub.get_detector('Camera1') +pixels = context.to_ome_pixels(size_z=10, size_t=100, size_c=1) + +# With frame events +events = hub.peek_frame_events('Camera1', n=100) +planes = [event.to_ome_plane(the_t=i) for i, event in enumerate(events)] +``` + +## Usage Examples + +### Example 1: Publishing Positioner Metadata + +```python +class MyPositionerManager: + def __init__(self): + self.position = {'X': 0.0, 'Y': 0.0, 'Z': 0.0} + + def move(self, axis, distance): + self.position[axis] += distance + + # Publish position update + key = ('Positioner', 'MyStage', axis, 'PositionUm') + commChannel.sharedAttrs[key] = self.position[axis] +``` + +### Example 2: Registering a Detector + +```python +# In MasterController or detector manager init +from imswitch.imcontrol.model.metadata import DetectorContext + +context = DetectorContext( + name='Camera1', + shape_px=(2048, 2048), + pixel_size_um=6.5, + dtype='uint16', + exposure_ms=100.0, + gain=1.0, + binning=1 +) + +master_controller.metadataHub.register_detector('Camera1', context) +``` + +### Example 3: Pushing Frame Events + +```python +# In acquisition loop +for frame_idx in range(num_frames): + # Capture frame + image = detector.getLatestFrame() + + # Get current positions + stage_x = positioner.position['X'] + stage_y = positioner.position['Y'] + + # Push frame event + hub.push_frame_event( + 'Camera1', + stage_x_um=stage_x, + stage_y_um=stage_y, + exposure_ms=current_exposure + ) +``` + +### Example 4: Consuming Metadata in Writers + +```python +# In storer write method +def write_chunk(self, detector_name, chunk): + # Get metadata snapshot + detector_snapshot = hub.snapshot_detector(detector_name) + + # Get detector context + context = detector_snapshot['detector_context'] + pixel_size = context['pixel_size_um'] + + # Get frame events + events = hub.pop_frame_events(detector_name, n=len(chunk)) + + # Write with metadata + for i, (frame, event) in enumerate(zip(chunk, events)): + self.write_frame( + frame, + pixel_size=pixel_size, + stage_x=event.stage_x_um, + stage_y=event.stage_y_um + ) +``` + +## Best Practices + +1. **Use Standardized Keys**: Always use `MetadataSchema.make_key()` to create keys +2. **Publish on Change**: Update metadata when hardware state changes +3. **Register Detectors Early**: Register detectors during initialization +4. **Push Frame Events**: Push events at trigger time, not after acquisition +5. **Pop Events on Write**: Pop exactly the number of frames being written +6. **Clear Events**: Clear event queues between recordings + +## Migration from Legacy Code + +### Old Code (Legacy) +```python +# Old way - no units, no types +self._commChannel.sharedAttrs[('Position', 'Stage', 'X')] = 100.5 +``` + +### New Code (Schema-Based) +```python +# New way - with schema +key = MetadataSchema.make_key( + MetadataCategory.POSITIONER, + 'Stage', + 'X', + 'PositionUm' # Field name indicates units +) +self._commChannel.sharedAttrs[key] = 100.5 +``` + +## Troubleshooting + +### Metadata Not Appearing in Files +- Check that MetadataHub is initialized in MasterController +- Verify SharedAttrsMetadataBridge is connected +- Ensure keys follow standardized format +- Check RecordingController is using `_get_detector_attrs()` + +### Frame Event Count Mismatch +- Ensure push_frame_event() is called exactly once per frame +- Verify pop_frame_events() pops correct number +- Clear events between recordings with `clear_frame_events()` + +### Missing Detector Metadata +- Verify detector is registered with hub +- Check detector context has required fields +- Ensure registration happens after detectorsManager init + +## API Reference + +See inline documentation in: +- `imswitch/imcontrol/model/metadata/metadata_hub.py` +- `imswitch/imcontrol/model/metadata/schema.py` +- `imswitch/imcontrol/model/metadata/sharedattrs_bridge.py` + +## OME-types Integration + +The hub provides direct OME-types object generation: + +```python +# Generate complete OME metadata +ome = hub.to_ome(detector_names=['Camera1']) + +# Export to OME-XML +xml_string = ome.to_xml() + +# Use with OME-Zarr/OME-TIFF writers +# (Integration in progress) +``` + +--- + +*For questions or issues, consult the ImSwitch documentation or file an issue on GitHub.* diff --git a/frontend/src/App.jsx b/frontend/src/App.jsx index 00cb30206..acfcc6a24 100644 --- a/frontend/src/App.jsx +++ b/frontend/src/App.jsx @@ -45,7 +45,6 @@ import StageCenterCalibrationWizard from "./components/StageCenterCalibrationWiz import { NavigationDrawer, TopBar } from "./components/navigation"; import AppManagerPage from "./components/AppManagerPage.jsx"; -import { MCTProvider } from "./context/MCTContext.js"; //axon import AxonTabComponent from "./axon/AxonTabComponent.js"; @@ -440,11 +439,6 @@ function App() { )} {selectedPlugin === "Infinity Scanning" && } {selectedPlugin === "Blockly" && } - {selectedPlugin === "Timelapse" && ( - - - - )} {selectedPlugin === "Objective" && } {selectedPlugin === "About" && } {selectedPlugin === "SystemSettings" && } diff --git a/frontend/src/components/ConfigurationWizard.js b/frontend/src/components/ConfigurationWizard.js index b33a4cc27..93d536e12 100644 --- a/frontend/src/components/ConfigurationWizard.js +++ b/frontend/src/components/ConfigurationWizard.js @@ -247,7 +247,6 @@ const ConfigurationWizard = ({ open, onClose, hostIP, hostPort }) => { "sim": null, "dpc": null, "objective": null, - "mct": null, "nidaq": { "timerCounterChannel": null, "startTrigger": false diff --git a/frontend/src/components/MCTController.js b/frontend/src/components/MCTController.js deleted file mode 100644 index a578143ed..000000000 --- a/frontend/src/components/MCTController.js +++ /dev/null @@ -1,349 +0,0 @@ -import React, { useContext, useState, useEffect } from "react"; -import { useDispatch, useSelector } from "react-redux"; -import { MCTContext } from "../context/MCTContext"; -import { - Paper, - Grid, - TextField, - Checkbox, - FormControlLabel, - Slider, - Button, - Typography, -} from "@mui/material"; -import * as widgetSlice from "../state/slices/WidgetSlice.js"; -//import { useWebSocket } from "../context/useWebSocket"; - - -const MCTController = ({ hostIP, hostPort }) => { - const [numImagesTaken, setNumImagesTaken] = useState(0); - const [folderPath, setFolderPath] = useState(""); - //const socket = useWebSocket(); - const { - timePeriod, - setTimePeriod, - numMeasurements, - setNumMeasurements, - zMin, - setZMin, - zMax, - setZMax, - zSteps, - setZSteps, - zStackEnabled, - setZStackEnabled, - xMin, - setXMin, - xMax, - setXMax, - xSteps, - setXSteps, - xStackEnabled, - setXStackEnabled, - yMin, - setYMin, - yMax, - setYMax, - ySteps, - setYSteps, - yStackEnabled, - setYStackEnabled, - intensityLaser1, - setIntensityLaser1, - intensityLaser2, - setIntensityLaser2, - intensityLED, - setIntensityLED, - fileName, - setFileName, - isRunning, - setIsRunning, - } = useContext(MCTContext); - - // Redux dispatcher and state - const dispatch = useDispatch(); - const widgetState = useSelector(widgetSlice.getWidgetState); - - /* - useEffect(() => { - if (socket) { - socket.onmessage = (event) => { - console.log("Message received:", event.data); - - }; - } - return () => { - if (socket) { - socket.onmessage = null; - } - }; - }, [socket]); - - */ - useEffect(() => { - const fetchMCTStatus = () => { - const url = `${hostIP}:${hostPort}/imswitch/api/MCTController/getMCTStatus`; - - - fetch(url) - .then((response) => response.json()) - .then((data) => { - console.log(data); - // Set default values from the response - // TODO: Should we fetch this on the first render? - - setTimePeriod(data.timePeriod); - setZStackEnabled(data.zStackEnabled); - setZMin(data.zStackMin); - setZMax(data.zStackMax); - setZSteps(data.zStackStep); - setXStackEnabled(data.xyScanEnabled); - setXMin(data.xScanMin); - setXMax(data.xScanMax); - setXSteps(data.xScanStep); - setYMin(data.yScanMin); - setYMax(data.yScanMax); - setYSteps(data.yScanStep); - setIntensityLaser1(data.Illu1Value); - setIntensityLaser2(data.Illu2Value); - setIntensityLED(data.Illu3Value); - - // enable/disable start/stop - setNumImagesTaken(data.nImagesTaken); - setIsRunning(data.isMCTrunning); - setFolderPath(data.MCTFilename); - }) - .catch((error) => { - //console.error("Error fetching MCT status:", error); - }); - }; - - fetchMCTStatus(); - - }, [hostIP, hostPort]); - - - - const handleStart = () => { - const url = - `${hostIP}:${hostPort}/imswitch/api/MCTController/startTimelapseImaging?` + - `tperiod=${timePeriod}&nImagesToCapture=${numMeasurements}&MCTFilename=${fileName}&` + - `zStackEnabled=${zStackEnabled}&zStackMin=${zMin}&zStackMax=${zMax}&zStackStep=${zSteps}&` + - `xyScanEnabled=${xStackEnabled}&xScanMin=${xMin}&xScanMax=${xMax}&xScanStep=${xSteps}&` + - `yScanMin=${yMin}&yScanMax=${yMax}&yScanStep=${ySteps}&` + - `IlluValue1=${intensityLaser1}&IlluValue2=${intensityLaser2}&IlluValue3=${intensityLED}`; - - fetch(url, { method: "GET" }) - .then((response) => response.json()) - .then((data) => { - console.log(data); - setIsRunning(true); - }) - .catch((error) => console.error("Error:", error)); - }; - - const handleStop = () => { - const url = `${hostIP}:${hostPort}/imswitch/api/MCTController/stopTimelapseImaging`; - fetch(url, { method: "GET" }) - .then((response) => response.json()) - .then((data) => { - console.log(data); - setIsRunning(false); - }) - .catch((error) => console.error("Error:", error)); - }; - - return ( - - - - setTimePeriod(e.target.value)} - fullWidth - /> - - - setNumMeasurements(e.target.value)} - fullWidth - /> - - {/* Z-Stack, X-Stack, Y-Stack UI */} - - setZMin(e.target.value)} - fullWidth - /> - - - setZMax(e.target.value)} - fullWidth - /> - - - setZSteps(e.target.value)} - fullWidth - /> - - - } - checked={zStackEnabled} - onChange={(e) => setZStackEnabled(e.target.checked)} - label="Z-Stack Enabled" - /> - - {/* XY Scan and Y Scan */} - - setXMin(e.target.value)} - fullWidth - /> - - - setXMax(e.target.value)} - fullWidth - /> - - - setXSteps(e.target.value)} - fullWidth - /> - - - } - checked={xStackEnabled} - onChange={(e) => setXStackEnabled(e.target.checked)} - label="XY Scan Enabled" - /> - - - setYMin(e.target.value)} - fullWidth - /> - - - setYMax(e.target.value)} - fullWidth - /> - - - setYSteps(e.target.value)} - fullWidth - /> - - - } - checked={yStackEnabled} - onChange={(e) => setYStackEnabled(e.target.checked)} - label="Y-Stack Enabled" - /> - - {/* Intensity Controls */} - - Intensity (Laser 1): {intensityLaser1} - dispatch(widgetSlice.setSliderValue(value))} - max={32767} - step={1} - /> - - - - Intensity (Laser 2): {widgetState.generic["slider2"] || 0} - - dispatch(widgetSlice.updateGeneric({ key: "slider2", value }))} - max={32767} - step={1} - /> - - - - Intensity (LED): {widgetState.generic["intensity"] || 0} - - - dispatch(widgetSlice.updateGeneric({ key: "intensity", value })) - } - max={255} - step={1} - /> - - - setFileName(e.target.value)} - fullWidth - /> - - - - {`Images taken: ${numImagesTaken}`} - - - - - {`Folder: ${folderPath}`} - - - - - - - - - ); -}; - -export default MCTController; diff --git a/frontend/src/context/MCTContext.js b/frontend/src/context/MCTContext.js deleted file mode 100644 index f32b90637..000000000 --- a/frontend/src/context/MCTContext.js +++ /dev/null @@ -1,75 +0,0 @@ -// src/context/MCTContext.js -import React, { createContext, useState } from 'react'; - -// Create the context -export const MCTContext = createContext(); - -// Create the provider component -export const MCTProvider = ({ children }) => { - const [timePeriod, setTimePeriod] = useState('5'); - const [numMeasurements, setNumMeasurements] = useState('1'); - const [zMin, setZMin] = useState('-100'); - const [zMax, setZMax] = useState('100'); - const [zSteps, setZSteps] = useState('0'); - const [zStackEnabled, setZStackEnabled] = useState(false); - const [xMin, setXMin] = useState('-1000'); - const [xMax, setXMax] = useState('1000'); - const [xSteps, setXSteps] = useState('0'); - const [xStackEnabled, setXStackEnabled] = useState(false); - const [yMin, setYMin] = useState('-1000'); - const [yMax, setYMax] = useState('1000'); - const [ySteps, setYSteps] = useState('0'); - const [yStackEnabled, setYStackEnabled] = useState(false); - const [intensityLaser1, setIntensityLaser1] = useState(0); - const [intensityLaser2, setIntensityLaser2] = useState(0); - const [intensityLED, setIntensityLED] = useState(0); - const [fileName, setFileName] = useState('MCT'); - const [isRunning, setIsRunning] = useState(false); - - return ( - - {children} - - ); -}; diff --git a/imswitch/imcommon/model/SharedAttributes.py b/imswitch/imcommon/model/SharedAttributes.py index 6a4700d0d..9f2232fa2 100644 --- a/imswitch/imcommon/model/SharedAttributes.py +++ b/imswitch/imcommon/model/SharedAttributes.py @@ -1,7 +1,33 @@ import json +import time +import numpy as np from imswitch.imcommon.framework import Signal, SignalInterface +# Lazy import to avoid circular import issues +# The import chain imcommon.model -> SharedAttributes -> imcontrol.model.metadata +# -> imcontrol.model.managers -> initLogger -> imcommon.model creates a cycle +HAS_METADATA_HUB = False +SharedAttrValue = None + +def _get_shared_attr_value_class(): + """Lazy import of SharedAttrValue to avoid circular imports.""" + global HAS_METADATA_HUB, SharedAttrValue + if SharedAttrValue is None: + try: + from imswitch.imcontrol.model.metadata.schema import SharedAttrValue as _SharedAttrValue + SharedAttrValue = _SharedAttrValue + HAS_METADATA_HUB = True + except ImportError: + HAS_METADATA_HUB = False + return SharedAttrValue + + +def _is_shared_attr_value(value): + """Check if value is a SharedAttrValue instance (with lazy import).""" + cls = _get_shared_attr_value_class() + return cls is not None and isinstance(value, cls) + class SharedAttributes(SignalInterface): sigAttributeSet = Signal(object, object) # (key, value) @@ -10,17 +36,38 @@ def __init__(self): super().__init__() self._data = {} - def getHDF5Attributes(self): - """ Returns a dictionary of HDF5 attributes representing this object. + def getSharedAttributes(self): + """ + Returns a dictionary of HDF5 attributes representing this object. + + If values are SharedAttrValue objects, extracts the actual value. + Also includes metadata as separate keys if available. """ attrs = {} for key, value in self._data.items(): - attrs[':'.join(key)] = value + key_str = ':'.join(key) + + # Check if value is a SharedAttrValue + if _is_shared_attr_value(value): + attrs[key_str] = value.value + # Add metadata as separate keys + if value.units: + attrs[f"{key_str}:units"] = value.units + if value.timestamp: + attrs[f"{key_str}:timestamp"] = value.timestamp + if value.source: + attrs[f"{key_str}:source"] = value.source + else: + attrs[key_str] = value return attrs def getJSON(self): - """ Returns a JSON representation of this instance. """ + """ + Returns a JSON representation of this instance. + + If values are SharedAttrValue objects, includes full metadata. + """ attrs = {} for key, value in self._data.items(): parent = attrs @@ -29,9 +76,31 @@ def getJSON(self): parent[key[i]] = {} parent = parent[key[i]] - parent[key[-1]] = value - - return json.dumps(attrs) + # Check if value is a SharedAttrValue + if _is_shared_attr_value(value): + parent[key[-1]] = { + 'value': value.value, + 'timestamp': value.timestamp, + 'units': value.units, + 'dtype': value.dtype, + 'source': value.source, + 'valid': value.valid, + } + else: + parent[key[-1]] = value + + # Custom serializer for numpy types and other special objects + def json_serializer(obj): + if isinstance(obj, np.ndarray): + return obj.tolist() + elif isinstance(obj, (np.integer, np.floating)): + return obj.item() + elif hasattr(obj, '__dict__'): + return str(obj) + else: + return str(obj) + + return json.dumps(attrs, default=json_serializer) def update(self, data): """ Updates this object with the data in the given dictionary or @@ -44,12 +113,31 @@ def update(self, data): def __getitem__(self, key): self._validateKey(key) - return self._data[key] + value = self._data[key] + # For backwards compatibility, return raw value if it's a SharedAttrValue + if _is_shared_attr_value(value): + return value.value + return value + + def get_typed(self, key): + """ + Get the full typed value (SharedAttrValue) if available. + + Returns: + SharedAttrValue if available, otherwise raw value + """ + self._validateKey(key) + return self._data.get(key) def __setitem__(self, key, value): self._validateKey(key) + # Store the value as-is (can be raw value or SharedAttrValue) self._data[key] = value - self.sigAttributeSet.emit(key, value) + # For signal emission, unwrap SharedAttrValue to maintain backwards compatibility + if _is_shared_attr_value(value): + self.sigAttributeSet.emit(key, value.value) + else: + self.sigAttributeSet.emit(key, value) def __iter__(self): yield from self._data.items() diff --git a/imswitch/imcommon/view/guitools/__init__.py b/imswitch/imcommon/view/guitools/__init__.py index 29682c0d7..615f3f4d7 100644 --- a/imswitch/imcommon/view/guitools/__init__.py +++ b/imswitch/imcommon/view/guitools/__init__.py @@ -2,7 +2,6 @@ if not IS_HEADLESS: # FIXME: hacky way to do that I guess.. from .BetterPushButton import BetterPushButton - from .joystick import Joystick from .BetterSlider import BetterSlider from .CheckableComboBox import CheckableComboBox from .FloatSlider import FloatSlider diff --git a/imswitch/imcommon/view/guitools/joystick.py b/imswitch/imcommon/view/guitools/joystick.py deleted file mode 100644 index cd1190418..000000000 --- a/imswitch/imcommon/view/guitools/joystick.py +++ /dev/null @@ -1,169 +0,0 @@ -import math - -from PyQt5.QtWidgets import QWidget -from PyQt5.QtWidgets import QLabel -from PyQt5.QtGui import QPainter, QBrush, QPen -from PyQt5.QtCore import Qt -from qtpy import QtCore - -class Joystick(QWidget): - floatValueChanged = QtCore.Signal(float) - ''' based on https://github.com/bsiyoung/PyQt5-Joystick/ ''' - - def __init__(self, window_min_size = [200, 200], callbackFct=None): - super().__init__() - - self.window_title = 'Joystick' - self.window_min_size = window_min_size - self.wnd_fit_size = 400 - self.window_size = [self.wnd_fit_size, self.wnd_fit_size] - - self.circle_margin_ratio = 0.1 - self.circle_diameter = int(self.window_size[0] * (1 - self.circle_margin_ratio * 2)) - - self.stick_diameter_ratio = 0.1 - self.stick_diameter = int(self.circle_diameter * self.stick_diameter_ratio) - self.is_mouse_down = False - self.stick_pos = [0, 0] - self.strength = 0 - - self.stat_label_margin = 10 - self.stat_label = QLabel(self) - - self.callbackFct = callbackFct - - self.init_ui() - - def init_ui(self): - self.setWindowTitle(self.window_title) - - self.setMinimumSize(self.window_min_size[0], self.window_min_size[1]) - self.resize(self.window_size[0], self.window_size[1]) - - self.stat_label.setAlignment(Qt.AlignLeft) - self.stat_label.setGeometry(self.stat_label_margin, self.stat_label_margin, - self.window_min_size[0] - self.stat_label_margin * 2, - self.window_min_size[0] - self.stat_label_margin * 2) - font = self.stat_label.font() - font.setPointSize(10) - - self.setMouseTracking(True) - - self.show() - - def resizeEvent(self, event): - self.wnd_fit_size = min(self.width(), self.height()) - - self.circle_diameter = int(self.wnd_fit_size * (1 - self.circle_margin_ratio * 2)) - self.stick_diameter = int(self.circle_diameter * self.stick_diameter_ratio) - - def _draw_outer_circle(self, painter): - painter.setPen(QPen(Qt.black, 2, Qt.SolidLine)) - - circle_margin = int(self.wnd_fit_size * self.circle_margin_ratio) - painter.drawEllipse(circle_margin, circle_margin, - self.circle_diameter, self.circle_diameter) - - def _draw_sub_lines(self, painter): - painter.setRenderHint(QPainter.Antialiasing) - painter.setPen(QPen(Qt.lightGray, 1, Qt.DashLine)) - - num_sub_line = 6 - for i in range(num_sub_line): - theta = math.pi / 2 - math.pi * i / num_sub_line - x0 = int(self.wnd_fit_size / 2 - self.circle_diameter / 2 * math.cos(theta)) - y0 = int(self.wnd_fit_size / 2 - self.circle_diameter / 2 * math.sin(theta)) - x1 = int(self.wnd_fit_size / 2 - self.circle_diameter / 2 * math.cos(theta + math.pi)) - y1 = int(self.wnd_fit_size / 2 - self.circle_diameter / 2 * math.sin(theta + math.pi)) - painter.drawLine(x0, y0, x1, y1) - - def _draw_sub_circles(self, painter): - painter.setPen(QPen(Qt.lightGray, 1, Qt.DashLine)) - - num_sub_circle = 4 - for i in range(num_sub_circle): - sub_radius = int(self.circle_diameter / 2 * (i + 1) / (num_sub_circle + 1)) - sub_margin = int(self.wnd_fit_size / 2 - sub_radius) - painter.drawEllipse(sub_margin, sub_margin, sub_radius * 2, sub_radius * 2) - - # Draw Inner(Joystick) Circle - painter.setBrush(QBrush(Qt.black, Qt.SolidPattern)) - stick_margin = [int(self.wnd_fit_size / 2 + self.stick_pos[0] - self.stick_diameter / 2), - int(self.wnd_fit_size / 2 - self.stick_pos[1] - self.stick_diameter / 2)] - painter.drawEllipse(stick_margin[0], stick_margin[1], self.stick_diameter, self.stick_diameter) - - def paintEvent(self, event): - painter = QPainter(self) - - # Draw Outer(Main) Circle - self._draw_outer_circle(painter) - - # Draw Sub Lines - self._draw_sub_lines(painter) - - # Draw Sub Circles - self._draw_sub_circles(painter) - - # Change Status Label Text (Angle In Degree) - strength = self.get_strength() - angle = self.get_angle(in_deg=True) - if angle < 0: - angle += 360 - #self.stat_label.setText('Strength : {:.2f} \nDirection : {:.2f}°'.format(strength, angle)) - - def mouseMoveEvent(self, event): - # Move Stick Only When Mouse Left Button Pressed - if self.is_mouse_down is False: - return - - # Window Coordinate To Cartesian Coordinate - pos = event.pos() - stick_pos_buf = [pos.x() - self.wnd_fit_size / 2, self.wnd_fit_size / 2 - pos.y()] - - # If Cursor Is Not In Available Range, Correct It - if self._get_strength(stick_pos_buf) > 1.0: - theta = math.atan2(stick_pos_buf[1], stick_pos_buf[0]) - radius = (self.circle_diameter - self.stick_diameter) / 2 - stick_pos_buf[0] = radius * math.cos(theta) - stick_pos_buf[1] = radius * math.sin(theta) - - # Emit signal #TODO: Not sure if this is the right way to do it - if self.callbackFct is not None: - self.callbackFct(stick_pos_buf[0], stick_pos_buf[1]) - - self.stick_pos = stick_pos_buf - self.repaint() - - def mousePressEvent(self, event): - if event.button() == Qt.LeftButton: - self.is_mouse_down = True - - def mouseReleaseEvent(self, event): - if event.button() == Qt.LeftButton: - self.is_mouse_down = False - self.stick_pos = [0, 0] - self.repaint() - if self.callbackFct is not None: - self.callbackFct(0,0) - - - # Get Strength With Argument - def _get_strength(self, stick_pos): - max_distance = (self.circle_diameter - self.stick_diameter) / 2 - distance = math.sqrt(stick_pos[0] * stick_pos[0] + stick_pos[1] * stick_pos[1]) - - return distance / max_distance - - # Get Strength With Current Stick Position - def get_strength(self): - max_distance = (self.circle_diameter - self.stick_diameter) / 2 - distance = math.sqrt(self.stick_pos[0] * self.stick_pos[0] + self.stick_pos[1] * self.stick_pos[1]) - - return distance / max_distance - - def get_angle(self, in_deg=False): - angle = math.atan2(self.stick_pos[1], self.stick_pos[0]) - if in_deg is True: - angle = angle * 180 / math.pi - - return angle diff --git a/imswitch/imcontrol/_test/__init__.py b/imswitch/imcontrol/_test/__init__.py index bf57b8371..ff9309ec1 100644 --- a/imswitch/imcontrol/_test/__init__.py +++ b/imswitch/imcontrol/_test/__init__.py @@ -127,7 +127,6 @@ "Laser", "Positioner", "Autofocus", - "MCT", "ROIScan", "HistoScan", "Hypha" @@ -268,7 +267,6 @@ "Laser", "Positioner", "Autofocus", - "MCT", "ROIScan", "HistoScan", "Hypha", diff --git a/imswitch/imcontrol/_test/unit/test_metadata_hub.py b/imswitch/imcontrol/_test/unit/test_metadata_hub.py new file mode 100644 index 000000000..5f68b3d21 --- /dev/null +++ b/imswitch/imcontrol/_test/unit/test_metadata_hub.py @@ -0,0 +1,386 @@ +""" +Unit tests for MetadataHub and related metadata infrastructure. + +Tests the MetadataHub, DetectorContext, FrameEvent, and schema functionality. +""" +import time +import numpy as np +from typing import Tuple + + +def test_metadata_schema_basic(): + """Test basic MetadataSchema functionality.""" + from imswitch.imcontrol.model.metadata import MetadataSchema, MetadataCategory + + # Test key validation + valid_key = ('Positioner', 'Stage', 'X', 'PositionUm') + assert MetadataSchema.validate_key(valid_key) is True + + invalid_key = ('InvalidCategory', 'Device') + assert MetadataSchema.validate_key(invalid_key) is False + + # Test field info retrieval + field_info = MetadataSchema.get_field_info('Positioner', 'PositionUm') + assert field_info is not None + units, dtype, description = field_info + assert units == 'um' + assert dtype == 'float' + + +def test_shared_attr_value(): + """Test SharedAttrValue wrapper.""" + from imswitch.imcontrol.model.metadata import SharedAttrValue + + # Create a value with timestamp + timestamp = time.time() + attr_value = SharedAttrValue( + value=123.45, + timestamp=timestamp, + units='um', + dtype='float', + source='TestController' + ) + + assert attr_value.value == 123.45 + assert attr_value.units == 'um' + assert attr_value.timestamp == timestamp + assert attr_value.valid is True + + +def test_metadata_schema_normalize(): + """Test value normalization with schema.""" + from imswitch.imcontrol.model.metadata import MetadataSchema + + key = ('Positioner', 'Stage', 'X', 'PositionUm') + value = 100.5 + + normalized = MetadataSchema.normalize_value(key, value, source='Test') + + assert normalized.value == 100.5 + assert normalized.units == 'um' + assert normalized.dtype == 'float' + assert normalized.source == 'Test' + + +def test_detector_context_basic(): + """Test DetectorContext creation.""" + from imswitch.imcontrol.model.metadata import DetectorContext + + context = DetectorContext( + name='TestCamera', + shape_px=(1024, 1024), + pixel_size_um=6.5, + dtype='uint16' + ) + + assert context.name == 'TestCamera' + assert context.shape_px == (1024, 1024) + assert context.pixel_size_um == 6.5 + assert context.dtype == 'uint16' + + # FOV should be calculated automatically + expected_fov = (1024 * 6.5, 1024 * 6.5) + assert context.fov_um == expected_fov + + +def test_detector_context_update(): + """Test DetectorContext updates.""" + from imswitch.imcontrol.model.metadata import DetectorContext + + context = DetectorContext( + name='TestCamera', + shape_px=(512, 512), + pixel_size_um=1.0, + ) + + initial_time = context.last_update + time.sleep(0.01) + + context.update(exposure_ms=100.0, gain=2.5) + + assert context.exposure_ms == 100.0 + assert context.gain == 2.5 + assert context.last_update > initial_time + + +def test_detector_context_to_dict(): + """Test DetectorContext serialization.""" + from imswitch.imcontrol.model.metadata import DetectorContext + + context = DetectorContext( + name='TestCamera', + shape_px=(1024, 768), + pixel_size_um=5.0, + exposure_ms=50.0, + gain=1.5 + ) + + context_dict = context.to_dict() + + assert context_dict['name'] == 'TestCamera' + assert context_dict['shape_px'] == (1024, 768) + assert context_dict['pixel_size_um'] == 5.0 + assert context_dict['exposure_ms'] == 50.0 + assert context_dict['gain'] == 1.5 + + +def test_frame_event_basic(): + """Test FrameEvent creation.""" + from imswitch.imcontrol.model.metadata import FrameEvent + + event = FrameEvent( + frame_number=42, + detector_name='Camera1', + stage_x_um=100.0, + stage_y_um=200.0, + stage_z_um=50.0, + exposure_ms=100.0 + ) + + assert event.frame_number == 42 + assert event.detector_name == 'Camera1' + assert event.stage_x_um == 100.0 + assert event.stage_y_um == 200.0 + assert event.stage_z_um == 50.0 + assert event.exposure_ms == 100.0 + + +def test_frame_event_to_dict(): + """Test FrameEvent serialization.""" + from imswitch.imcontrol.model.metadata import FrameEvent + + event = FrameEvent( + frame_number=10, + stage_x_um=1.0, + stage_y_um=2.0, + metadata={'extra': 'data'} + ) + + event_dict = event.to_dict() + + assert event_dict['frame_number'] == 10 + assert event_dict['stage_x_um'] == 1.0 + assert event_dict['metadata']['extra'] == 'data' + + +def test_metadata_hub_creation(): + """Test MetadataHub initialization.""" + from imswitch.imcontrol.model.metadata import MetadataHub + + hub = MetadataHub() + assert hub is not None + + +def test_metadata_hub_update(): + """Test MetadataHub update functionality.""" + from imswitch.imcontrol.model.metadata import MetadataHub + + hub = MetadataHub() + + key = ('Positioner', 'Stage', 'X', 'PositionUm') + hub.update(key, 123.45, source='TestController') + + value = hub.get(key) + assert value is not None + assert value.value == 123.45 + assert value.units == 'um' + + +def test_metadata_hub_detector_registration(): + """Test detector registration with hub.""" + from imswitch.imcontrol.model.metadata import MetadataHub, DetectorContext + + hub = MetadataHub() + + context = DetectorContext( + name='Camera1', + shape_px=(1024, 1024), + pixel_size_um=6.5 + ) + + hub.register_detector('Camera1', context) + + retrieved = hub.get_detector('Camera1') + assert retrieved is not None + assert retrieved.name == 'Camera1' + assert retrieved.shape_px == (1024, 1024) + + +def test_metadata_hub_detector_update(): + """Test updating detector context via hub.""" + from imswitch.imcontrol.model.metadata import MetadataHub, DetectorContext + + hub = MetadataHub() + + context = DetectorContext( + name='Camera1', + shape_px=(512, 512), + pixel_size_um=1.0 + ) + + hub.register_detector('Camera1', context) + hub.update_detector('Camera1', exposure_ms=200.0, gain=3.0) + + retrieved = hub.get_detector('Camera1') + assert retrieved.exposure_ms == 200.0 + assert retrieved.gain == 3.0 + + +def test_metadata_hub_snapshot(): + """Test metadata snapshot functionality.""" + from imswitch.imcontrol.model.metadata import MetadataHub + + hub = MetadataHub() + + # Add some metadata + hub.update(('Positioner', 'Stage', 'X', 'PositionUm'), 100.0) + hub.update(('Positioner', 'Stage', 'Y', 'PositionUm'), 200.0) + hub.update(('Detector', 'Camera1', '', 'ExposureMs'), 50.0) + + snapshot = hub.snapshot_global() + + assert 'Positioner' in snapshot + assert 'Detector' in snapshot + + +def test_metadata_hub_frame_events(): + """Test frame event queue functionality.""" + from imswitch.imcontrol.model.metadata import MetadataHub, FrameEvent + + hub = MetadataHub() + + # Push frame events + event1 = FrameEvent(frame_number=0, stage_x_um=0.0, stage_y_um=0.0) + event2 = FrameEvent(frame_number=1, stage_x_um=10.0, stage_y_um=20.0) + + hub.push_frame_event('Camera1', event1) + hub.push_frame_event('Camera1', event2) + + # Peek without removing + events = hub.peek_frame_events('Camera1', n=2) + assert len(events) == 2 + assert events[0].frame_number == 0 + assert events[1].frame_number == 1 + + # Pop events + popped = hub.pop_frame_events('Camera1', n=2) + assert len(popped) == 2 + assert popped[0].stage_x_um == 0.0 + assert popped[1].stage_x_um == 10.0 + + # Queue should be empty now + remaining = hub.peek_frame_events('Camera1') + assert len(remaining) == 0 + + +def test_metadata_hub_frame_events_auto_increment(): + """Test automatic frame number increment.""" + from imswitch.imcontrol.model.metadata import MetadataHub + + hub = MetadataHub() + + # Push events without frame numbers + hub.push_frame_event('Camera1', stage_x_um=1.0) + hub.push_frame_event('Camera1', stage_x_um=2.0) + hub.push_frame_event('Camera1', stage_x_um=3.0) + + events = hub.peek_frame_events('Camera1') + + assert len(events) == 3 + assert events[0].frame_number == 0 + assert events[1].frame_number == 1 + assert events[2].frame_number == 2 + + +def test_shared_attributes_with_typed_values(): + """Test SharedAttributes with SharedAttrValue integration.""" + from imswitch.imcommon.model import SharedAttributes + try: + from imswitch.imcontrol.model.metadata import SharedAttrValue + has_metadata = True + except ImportError: + has_metadata = False + + if not has_metadata: + # Skip test if metadata module not available + return + + shared_attrs = SharedAttributes() + + # Set a raw value + key1 = ('Test', 'Device', 'Property') + shared_attrs[key1] = 123.45 + + # Should return raw value + assert shared_attrs[key1] == 123.45 + + # Set a SharedAttrValue + key2 = ('Test', 'Device2', 'Property2') + typed_value = SharedAttrValue( + value=678.9, + units='um', + source='TestSource' + ) + shared_attrs[key2] = typed_value + + # __getitem__ should return unwrapped value + assert shared_attrs[key2] == 678.9 + + # get_typed should return full SharedAttrValue + full_value = shared_attrs.get_typed(key2) + assert isinstance(full_value, SharedAttrValue) + assert full_value.value == 678.9 + assert full_value.units == 'um' + + +def test_shared_attributes_hdf5_with_metadata(): + """Test HDF5 export with metadata.""" + from imswitch.imcommon.model import SharedAttributes + try: + from imswitch.imcontrol.model.metadata import SharedAttrValue + has_metadata = True + except ImportError: + has_metadata = False + + if not has_metadata: + return + + shared_attrs = SharedAttributes() + + # Add typed value + key = ('Positioner', 'Stage', 'X', 'PositionUm') + typed_value = SharedAttrValue( + value=100.5, + units='um', + timestamp=time.time(), + source='Controller' + ) + shared_attrs[key] = typed_value + + # Export to HDF5 format + hdf5_attrs = shared_attrs.getSharedAttributes() + + # Should have main value and metadata keys + key_str = 'Positioner:Stage:X:PositionUm' + assert key_str in hdf5_attrs + assert hdf5_attrs[key_str] == 100.5 + assert f'{key_str}:units' in hdf5_attrs + assert hdf5_attrs[f'{key_str}:units'] == 'um' + assert f'{key_str}:source' in hdf5_attrs + + +# Copyright (C) 2020-2024 ImSwitch developers +# This file is part of ImSwitch. +# +# ImSwitch is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ImSwitch is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . diff --git a/imswitch/imcontrol/_test/unit/test_stores.py_ b/imswitch/imcontrol/_test/unit/test_stores.py_ deleted file mode 100644 index 155605fc5..000000000 --- a/imswitch/imcontrol/_test/unit/test_stores.py_ +++ /dev/null @@ -1,46 +0,0 @@ -from dataclasses import dataclass -import os -import pytest -from imswitch.imcontrol.model.managers.RecordingManager import ZarrStorer, HDF5Storer, TiffStorer -from imswitch.imcontrol.model.managers.DetectorsManager import DetectorsManager -import numpy as np -import zarr - - -@dataclass -class MockDetectorsManager(): - shape: tuple - pixelSizeUm: float - - -@pytest.fixture() -def fake_manager(): - return MockDetectorsManager(shape=(100,100), pixelSizeUm=12) - -def test_storer_instatiation(fake_manager): - ZarrStorer("test",fake_manager) - TiffStorer("test",fake_manager) - HDF5Storer("test",fake_manager) - -def test_zarr_storer(tmpdir, fake_manager): - """Test that the zarr storer can be instantiated and that the zarr store is created""" - path = os.path.join(tmpdir, "test") - storer = ZarrStorer(path, {"test_channel": fake_manager}) - storer.snap({"test_channel": np.zeros((100,100))}, {"test_channel": "test"}) - assert os.path.exists(path+".zarr"), "path does not exist" - - -def test_tiff_storer(tmpdir, fake_manager): - """Test that the tiff storer can be instantiated and that the files are created""" - path = os.path.join(tmpdir, "test") - storer = TiffStorer(path, {"test_channel": fake_manager}) - storer.snap({"test_channel": np.zeros((100,100))}, {"test_channel": "test"}) - assert os.path.exists(path + "_test_channel.tiff"), "path does not exist" - - -def test_hdf5_storer(tmpdir, fake_manager): - """Test that the HDF5 storer can be instantiated and that the files are created""" - path = os.path.join(tmpdir, "test") - storer = HDF5Storer(path, {"test_channel": fake_manager}) - storer.snap({"test_channel": np.zeros((100,100))}, {"test_channel": {"test": 3}}) - assert os.path.exists(path + "_test_channel.h5"), "path does not exist" \ No newline at end of file diff --git a/imswitch/imcontrol/controller/ImConMainController.py b/imswitch/imcontrol/controller/ImConMainController.py index 1102664c2..5a8f372d4 100644 --- a/imswitch/imcontrol/controller/ImConMainController.py +++ b/imswitch/imcontrol/controller/ImConMainController.py @@ -175,6 +175,22 @@ def __init__(self, options, setupInfo, mainView, moduleCommChannel): f"Could not create StorageController: {e}" ) + # Add MetadataController for metadata hub API access (no widget required) + try: + self.__logger.info("Creating MetadataController for metadata API") + from .controllers.MetadataController import MetadataController + self.controllers["Metadata"] = self.__factory.createController( + MetadataController, None + ) + # Register MetadataController + self.__masterController.registerController( + "Metadata", self.controllers["Metadata"] + ) + except Exception as e: + self.__logger.warning( + f"Could not create MetadataController: {e}" + ) + # Generate API self.__api = None apiObjs = list(self.controllers.values()) + [self.__commChannel] diff --git a/imswitch/imcontrol/controller/MasterController.py b/imswitch/imcontrol/controller/MasterController.py index 98f555ed3..8d4be4f4a 100644 --- a/imswitch/imcontrol/controller/MasterController.py +++ b/imswitch/imcontrol/controller/MasterController.py @@ -14,7 +14,6 @@ SIMManager, DPCManager, LEDMatrixsManager, - MCTManager, ROIScanManager, WebRTCManager, HyphaManager, @@ -26,12 +25,10 @@ LightsheetManager, NidaqManager, FOVLockManager, - StandManager, RotatorsManager, LEDsManager, ScanManagerBase, ScanManagerPointScan, - ScanManagerMoNaLISA, FlatfieldManager, FlowStopManager, WorkflowManager, @@ -40,6 +37,7 @@ ExperimentManager, ObjectiveManager, ArkitektManager, + InstrumentMetadataManager, ) @@ -58,6 +56,25 @@ def __init__(self, setupInfo, commChannel, moduleCommChannel): # Dictionary to hold controller references for inter-controller communication self._controllersRegistry = {} + # Initialize Metadata Hub + try: + from imswitch.imcontrol.model.metadata import ( + MetadataHub, SharedAttrsMetadataBridge + ) + self.metadataHub = MetadataHub() + self.__logger.info("Metadata Hub initialized") + + # Initialize bridge to connect SharedAttrs to MetadataHub + self.metadataBridge = SharedAttrsMetadataBridge( + shared_attrs=commChannel.sharedAttrs, + hub=self.metadataHub + ) + self.__logger.info("SharedAttrs-MetadataHub bridge initialized") + except ImportError as e: + self.__logger.warning(f"Metadata Hub not available: {e}") + self.metadataHub = None + self.metadataBridge = None + # Init managers self.rs232sManager = RS232sManager(self.__setupInfo.rs232devices) @@ -87,12 +104,18 @@ def __init__(self, setupInfo, commChannel, moduleCommChannel): self.UC2ConfigManager = UC2ConfigManager( self.__setupInfo.uc2Config, lowLevelManagers ) + + # Initialize InstrumentMetadataManager for OME instrument metadata + self.instrumentMetadataManager = InstrumentMetadataManager( + instrumentInfo=getattr(self.__setupInfo, 'instrument', None), + setupInfo=self.__setupInfo, + lowLevelManagers=lowLevelManagers, + ) + if "SIM" in self.__setupInfo.availableWidgets: self.simManager = SIMManager(self.__setupInfo.sim) if "DPC" in self.__setupInfo.availableWidgets: self.dpcManager = DPCManager(self.__setupInfo.dpc) - if "MCT" in self.__setupInfo.availableWidgets: - self.mctManager = MCTManager(self.__setupInfo.mct) if "NIDAQ" in self.__setupInfo.availableWidgets: self.nidaqManager = NidaqManager(self.__setupInfo.nidaq) if "Hypha" in self.__setupInfo.availableWidgets: @@ -136,6 +159,9 @@ def __init__(self, setupInfo, commChannel, moduleCommChannel): # If there is a imswitch_sim_manager, we want to add this as self.imswitch_sim_widget to the # MasterController Class + ################################################################################################### + # PLUGIN SYSTEM FOR MANAGERS + ################################################################################################### for entry_point in pkg_resources.iter_entry_points("imswitch.implugins"): InfoClass = None print(f"entry_point: {entry_point.name}") @@ -160,25 +186,89 @@ def __init__(self, setupInfo, commChannel, moduleCommChannel): except Exception as e: self.__logger.error(e) - if self.__setupInfo.microscopeStand: - self.standManager = StandManager( - self.__setupInfo.microscopeStand, **lowLevelManagers - ) + + ################################################################################################### + # PLUGIN SYSTEM FOR MANAGERS + ################################################################################################### # Generate scanManager type according to setupInfo if self.__setupInfo.scan: if self.__setupInfo.scan.scanWidgetType == "PointScan": self.scanManager = ScanManagerPointScan(self.__setupInfo) elif self.__setupInfo.scan.scanWidgetType == "Base": self.scanManager = ScanManagerBase(self.__setupInfo) - elif self.__setupInfo.scan.scanWidgetType == "MoNaLISA": - self.scanManager = ScanManagerMoNaLISA(self.__setupInfo) else: self.__logger.error( 'ScanWidgetType in SetupInfo["scan"] not recognized, choose one of the following:' ' ["Base", "PointScan", "MoNaLISA"].' ) return + + # Register detectors with MetadataHub + if self.metadataHub is not None: + self._register_detectors_with_hub() + + def _register_detectors_with_hub(self): + """Register all detectors with the MetadataHub.""" + try: + from imswitch.imcontrol.model.metadata import DetectorContext + + for detectorName in self.detectorsManager.getAllDeviceNames(): + detector = self.detectorsManager[detectorName] + + # Get detector properties + shape_px = detector.shape + # Safely access pixelSizeUm with fallback + if hasattr(detector, 'pixelSizeUm') and detector.pixelSizeUm: + if isinstance(detector.pixelSizeUm, (list, tuple)) and len(detector.pixelSizeUm) > 1: + pixel_size_um = detector.pixelSizeUm[1] + elif isinstance(detector.pixelSizeUm, (list, tuple)) and len(detector.pixelSizeUm) == 1: + pixel_size_um = detector.pixelSizeUm[0] + elif isinstance(detector.pixelSizeUm, (int, float)): + pixel_size_um = float(detector.pixelSizeUm) + else: + pixel_size_um = 1.0 + else: + pixel_size_um = 1.0 + + dtype = str(detector.fullChunk.dtype) if hasattr(detector, 'fullChunk') else 'uint16' + + # Create detector context + context = DetectorContext( + name=detectorName, + shape_px=shape_px, + pixel_size_um=pixel_size_um, + dtype=dtype, + binning=detector.binning, + channel_name=detectorName, + is_rgb=getattr(detector, '_isRGB', False), # Add isRGB flag + ) + + # Try to get additional properties if available + try: + if hasattr(detector, 'parameters') and 'exposure' in detector.parameters: + context.exposure_ms = detector.parameters['exposure'].value + except Exception: + pass + + try: + if hasattr(detector, 'parameters') and 'gain' in detector.parameters: + context.gain = detector.parameters['gain'].value + except Exception: + pass + + # Try to get bit depth + try: + if hasattr(detector, 'bitDepth'): + context.bit_depth = detector.bitDepth + except Exception: + pass + + # Register with hub + self.metadataHub.register_detector(detectorName, context) + self.__logger.info(f"Registered detector '{detectorName}' with MetadataHub") + except Exception as e: + self.__logger.error(f"Error registering detectors with MetadataHub: {e}") # Connect signals cc = self.__commChannel diff --git a/imswitch/imcontrol/controller/controllers/ExperimentController.py b/imswitch/imcontrol/controller/controllers/ExperimentController.py index 5e069b2b1..731cd6899 100644 --- a/imswitch/imcontrol/controller/controllers/ExperimentController.py +++ b/imswitch/imcontrol/controller/controllers/ExperimentController.py @@ -1118,6 +1118,34 @@ def save_frame_ome(self, context: WorkflowContext, metadata: Dict[str, Any], **k "z_index": kwargs.get("z_index", 0), "channel_index": kwargs.get("channel_index", 0), } + + # Enrich metadata with MetadataHub data if available + try: + if hasattr(self._master, 'metadataHub') and self._master.metadataHub is not None: + detector_name = self._master.detectorsManager.getAllDeviceNames()[0] + + # Get objective info from hub + hub_global = self._master.metadataHub.get_latest(flat=True, filter_category='Objective') + for key, value_dict in hub_global.items(): + if 'PixelSizeUm' in key: + ome_metadata['objective_pixel_size_um'] = value_dict.get('value') + elif 'Name' in key: + ome_metadata['objective_name'] = value_dict.get('value') + elif 'Magnification' in key: + ome_metadata['objective_magnification'] = value_dict.get('value') + elif 'NA' in key: + ome_metadata['objective_na'] = value_dict.get('value') + + # Get detector context (includes isRGB, exposure, etc.) + detector_ctx = self._master.metadataHub.get_detector(detector_name) + if detector_ctx: + ome_metadata['detector_is_rgb'] = detector_ctx.is_rgb + if detector_ctx.exposure_ms: + ome_metadata['exposure_ms'] = detector_ctx.exposure_ms + if detector_ctx.pixel_size_um: + ome_metadata['pixel_size_um'] = detector_ctx.pixel_size_um + except Exception as e: + self._logger.debug(f"Could not enrich OME metadata from MetadataHub: {e}") try: # Get file_writers list from context diff --git a/imswitch/imcontrol/controller/controllers/InLineHoloController.py_ b/imswitch/imcontrol/controller/controllers/InLineHoloController.py_ deleted file mode 100644 index bc7fc2b02..000000000 --- a/imswitch/imcontrol/controller/controllers/InLineHoloController.py_ +++ /dev/null @@ -1,1331 +0,0 @@ -import numpy as np -from datetime import datetime -from dataclasses import dataclass -from typing import Optional, Dict, Any, Tuple, List -import time -import traceback -import threading -import queue -import multiprocessing - -try: - import cv2 - hasCV2 = True -except: - hasCV2 = False - -try: - from scipy import fft as scipy_fft - hasSciPyFFT = True -except: - hasSciPyFFT = False - -from imswitch.imcommon.model import dirtools, initLogger, APIExport -from imswitch.imcommon.framework import Signal, Thread, Worker, Mutex -from imswitch.imcontrol.view import guitools -from ..basecontrollers import LiveUpdatedController -from imswitch import IS_HEADLESS - - -# ========================= -# Dataclasses (API-stable) -# ========================= -@dataclass -class InLineHoloParams: - """Inline hologram processing parameters""" - pixelsize: float = 3.45e-6 # meters - wavelength: float = 488e-9 # meters - na: float = 0.3 - dz: float = 0.0 # propagation distance in meters - roi_center: Optional[List[int]] = None # [x, y] in pixels - roi_size: Optional[int] = 256 # square ROI size - color_channel: str = "green" # "red", "green", "blue" - flip_x: bool = False - flip_y: bool = False - rotation: int = 0 # 0, 90, 180, 270 - update_freq: float = 10.0 # Hz (processing framerate) - binning: int = 1 # binning factor (1, 2, 4, etc.) - use_scipy_fft: bool = True # Use scipy.fft for multi-core FFT if available - fft_workers: int = 4 # Number of workers for scipy FFT (Pi 5 has 4 cores) - use_multiprocessing: bool = True # Use separate process for processing (bypass GIL) - use_float32: bool = True # Use float32 instead of float64 for speed - enable_benchmarking: bool = False # Enable performance logging - - def to_dict(self) -> Dict[str, Any]: - return { - "pixelsize": self.pixelsize, - "wavelength": self.wavelength, - "na": self.na, - "dz": self.dz, - "roi_center": self.roi_center, - "roi_size": self.roi_size, - "color_channel": self.color_channel, - "flip_x": self.flip_x, - "flip_y": self.flip_y, - "rotation": self.rotation, - "update_freq": self.update_freq, - "binning": self.binning, - "use_scipy_fft": self.use_scipy_fft, - "fft_workers": self.fft_workers, - "use_multiprocessing": self.use_multiprocessing, - "use_float32": self.use_float32, - "enable_benchmarking": self.enable_benchmarking, - } - - -@dataclass -class InLineHoloState: - """Inline hologram processing state""" - is_processing: bool = False - is_paused: bool = False - is_streaming: bool = False - last_process_time: float = 0.0 - frame_count: int = 0 - processed_count: int = 0 - dropped_frames: int = 0 - capture_fps: float = 0.0 - processing_fps: float = 0.0 - avg_process_time: float = 0.0 - - def to_dict(self) -> Dict[str, Any]: - return { - "is_processing": self.is_processing, - "is_paused": self.is_paused, - "is_streaming": self.is_streaming, - "last_process_time": self.last_process_time, - "frame_count": self.frame_count, - "processed_count": self.processed_count, - "dropped_frames": self.dropped_frames, - "capture_fps": self.capture_fps, - "processing_fps": self.processing_fps, - "avg_process_time": self.avg_process_time, - } - - -class InLineHoloController(LiveUpdatedController): - """ - Inline hologram processing controller with backend processing and API control. - - Features: - - Fresnel propagation for inline holograms - - Frame queue with configurable processing rate - - Pause/resume mechanism - - Binning support with automatic pixel size adjustment - - API control via RESTful endpoints - """ - - sigHoloImageComputed = Signal(np.ndarray, str) # (image, name) - sigHoloStateChanged = Signal(object) # state_dict - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self._logger = initLogger(self) - - # Get camera from setup or use first available detector - if hasattr(self._setupInfo, 'holo') and self._setupInfo.holo is not None: - self.camera = getattr(self._setupInfo.holo, 'camera', None) - else: - self.camera = None - - # If no camera specified, use first available detector - if self.camera is None: - try: - all_detectors = self._master.detectorsManager.getAllDeviceNames() - if all_detectors: - self.camera = all_detectors[0] - self._logger.info(f"Using first available detector: {self.camera}") - else: - self._logger.error("No detectors available") - return - except Exception as e: - self._logger.error(f"Failed to get detector list: {e}") - return - - # Initialize parameters from setup or defaults - if hasattr(self._setupInfo, 'holo') and self._setupInfo.holo is not None: - self._params = InLineHoloParams( - pixelsize=getattr(self._setupInfo.holo, "pixelsize", 3.45e-6), - wavelength=getattr(self._setupInfo.holo, "wavelength", 488e-9), - na=getattr(self._setupInfo.holo, "na", 0.3), - roi_center=getattr(self._setupInfo.holo, "roi_center", None), - roi_size=getattr(self._setupInfo.holo, "roi_size", 256), - update_freq=getattr(self._setupInfo.holo, "update_freq", 10.0), - binning=getattr(self._setupInfo.holo, "binning", 1), - ) - else: - self._params = InLineHoloParams() - - self._state = InLineHoloState() - self._processing_lock = threading.Lock() - - # Store last frame for pause mode - self._last_frame = None - - # MJPEG streaming - self._mjpeg_queue = queue.Queue(maxsize=2) - self._jpeg_quality = 85 - - # Producer-consumer pipeline (small queue, drops old frames) - self._raw_frame_queue = queue.Queue(maxsize=2) - - # Processing thread/process - self._processing_thread = None - self._capture_thread = None - self._stop_processing_event = threading.Event() - self._stop_capture_event = threading.Event() - - # Multiprocessing infrastructure (optional) - self._processing_process = None - self._mp_input_queue = None - self._mp_output_queue = None - self._mp_stop_event = None - - # Kernel cache for Fresnel propagator (avoid rebuilding every frame) - self._kernel_cache = {} - self._kernel_cache_lock = threading.Lock() - - # Performance monitoring - self._perf_capture_times = [] - self._perf_process_times = [] - self._perf_window_size = 30 # rolling window for FPS calculation - - # Legacy GUI setup - if not IS_HEADLESS: - self._setup_legacy_gui() - - self._logger.info("InLineHoloController initialized successfully") - - def __del__(self): - """Cleanup on deletion""" - self.stop_processing_inlineholo() - - # Stop multiprocessing worker if active - if self._processing_process is not None: - if self._mp_stop_event is not None: - self._mp_stop_event.set() - self._processing_process.join(timeout=2.0) - if self._processing_process.is_alive(): - self._processing_process.terminate() - - # Stop capture thread - if self._capture_thread is not None: - self._stop_capture_event.set() - self._capture_thread.join(timeout=2.0) - - # Stop processing thread - if self._processing_thread is not None: - self._stop_processing_event.set() - self._processing_thread.join(timeout=2.0) - - if hasattr(super(), '__del__'): - super().__del__() - - # ========================= - # Hologram Processing Core - # ========================= - @staticmethod - def _abssqr(x): - """Calculate intensity (what a detector sees)""" - return np.real(x * np.conj(x)) - - def _FT(self, x): - """Forward Fourier transform with proper frequency shift""" - if self._params.use_scipy_fft and hasSciPyFFT: - return scipy_fft.fftshift( - scipy_fft.fft2(x, workers=self._params.fft_workers) - ) - return np.fft.fftshift(np.fft.fft2(x)) - - def _iFT(self, x): - """Inverse Fourier transform with proper frequency shift""" - if self._params.use_scipy_fft and hasSciPyFFT: - return scipy_fft.ifft2( - scipy_fft.ifftshift(x), workers=self._params.fft_workers - ) - return np.fft.ifft2(np.fft.ifftshift(x)) - - def _get_kernel_cache_key(self, shape): - """Generate cache key for kernel based on parameters""" - nx, ny = shape[1], shape[0] - ps = self._params.pixelsize * self._params.binning - return (nx, ny, ps, self._params.wavelength, self._params.dz) - - def _get_fresnel_kernel(self, shape): - """ - Get or compute cached Fresnel kernel factors. - - Args: - shape: Shape of input field (ny, nx) - - Returns: - Tuple of (hfx, hfy) - 1D Fresnel factors for broadcasting - """ - cache_key = self._get_kernel_cache_key(shape) - - with self._kernel_cache_lock: - if cache_key in self._kernel_cache: - return self._kernel_cache[cache_key] - - # Cache miss - compute kernel - nx, ny = shape[1], shape[0] - ps = self._params.pixelsize * self._params.binning - lambda0 = self._params.wavelength - dz = self._params.dz - - grid_size_x = ps * nx - grid_size_y = ps * ny - - # Use float32 for speed if enabled - dtype = np.float32 if self._params.use_float32 else np.float64 - - # 1-D frequency grids - fx = np.linspace(-(nx-1)/2*(1/grid_size_x), (nx-1)/2*(1/grid_size_x), nx, dtype=dtype) - fy = np.linspace(-(ny-1)/2*(1/grid_size_y), (ny-1)/2*(1/grid_size_y), ny, dtype=dtype) - - # 1-D Fresnel factors - phase = 1j * np.pi * lambda0 * dz - hfx = np.exp(phase * fx**2) - hfy = np.exp(phase * fy**2) - - # Cache the result - with self._kernel_cache_lock: - self._kernel_cache[cache_key] = (hfx, hfy) - - return hfx, hfy - - def _invalidate_kernel_cache(self): - """Clear kernel cache when parameters change""" - with self._kernel_cache_lock: - self._kernel_cache.clear() - - def _fresnel_propagator(self, E0, dz): - """ - Freespace propagation using Fresnel kernel (with caching) - - Args: - E0: Initial complex field in x-y source plane - dz: Distance from sensor to object in meters - - Returns: - Ef: Propagated output field - """ - # Get cached kernel factors - hfx, hfy = self._get_fresnel_kernel(E0.shape) - - E0fft = self._FT(E0) - - # Broadcasted multiply without forming a 2-D exp - G = E0fft * hfx # broadcasts along columns - G *= hfy[:, None] # broadcasts along rows - - Ef = self._iFT(G) - - return Ef - - def _apply_binning(self, image): - """Apply binning to image if binning > 1""" - if self._params.binning <= 1: - return image - - b = self._params.binning - h, w = image.shape[:2] - - # Crop to multiple of binning - new_h = (h // b) * b - new_w = (w // b) * b - image = image[:new_h, :new_w] - - # Reshape and average - if len(image.shape) == 2: - # Grayscale - return image.reshape(new_h // b, b, new_w // b, b).mean(axis=(1, 3)) - else: - # Color - return image.reshape(new_h // b, b, new_w // b, b, -1).mean(axis=(1, 3)) - - def _extract_roi(self, image): - """Extract ROI from image based on current parameters""" - h, w = image.shape[:2] - - # Determine ROI center - if self._params.roi_center is not None and self._params.roi_center[0] is not None and self._params.roi_center[1] is not None: - cx, cy = self._params.roi_center - else: - cx, cy = w // 2, h // 2 - - # Calculate ROI bounds - roi_size = np.min([self._params.roi_size, np.max([h, w])]) - half_size = roi_size // 2 - - x1 = max(0, cx - half_size) - y1 = max(0, cy - half_size) - x2 = min(w, cx + half_size) - y2 = min(h, cy + half_size) - - return image[y1:y2, x1:x2] - - def _extract_color_channel(self, image): - """Extract specified color channel from RGB image""" - if len(image.shape) == 2: - return image # Already grayscale - channel_map = {"red": 0, "green": 1, "blue": 2} - channel_idx = channel_map.get(self._params.color_channel, 3) - if channel_idx not in [0, 1, 2]: - return np.mean(image, axis=2) # Return original if invalid channel - return image[:, :, channel_idx] - - def _apply_transforms(self, image): - """Apply flip and rotation transformations""" - if self._params.flip_x: - image = np.fliplr(image) - if self._params.flip_y: - image = np.flipud(image) - - # Apply rotation (counter-clockwise) - if self._params.rotation == 90: - image = np.rot90(image, k=1) - elif self._params.rotation == 180: - image = np.rot90(image, k=2) - elif self._params.rotation == 270: - image = np.rot90(image, k=3) - - return image - - def _process_inline(self, image): - """Process inline hologram""" - # Apply binning first - binned = self._apply_binning(image) - - # Extract ROI and color channel - roi = self._extract_roi(binned) - gray = self._extract_color_channel(roi) - gray = self._apply_transforms(gray) - - # Convert to complex field (E-field from intensity) - # Use float32 for speed if enabled - dtype = np.float32 if self._params.use_float32 else np.float64 - E0 = np.sqrt(gray.astype(dtype)) - - # Propagate - Ef = self._fresnel_propagator(E0, self._params.dz) - - # Return intensity - return self._abssqr(Ef) - - def _process_frame(self, image): - """Process a single frame""" - try: - result = self._process_inline(image) - if result is not None: - self.sigHoloImageComputed.emit(result, "inline_holo") - self._state.processed_count += 1 - - # Add to MJPEG stream if active - if self._state.is_streaming: - self._add_to_mjpeg_stream(result) - return result - except Exception as e: - self._logger.error(f"Error processing hologram: {e}") - self._logger.debug(traceback.format_exc()) - return None - - def _get_latest_frame(self): - """ - Fetch latest frame from camera detector. - Returns None if camera not available or no frame ready. - """ - try: - detector = self._master.detectorsManager[self.camera] - return detector.getLatestFrame() - except Exception as e: - self._logger.debug(f"Failed to get frame: {e}") - return None - - def _add_to_mjpeg_stream(self, image): - """ - Encode and add reconstructed hologram to MJPEG stream. - - Args: - image: Processed hologram result (float array) - """ - if not hasCV2: - return - - try: - # Normalize to uint8 - frame = np.array(image) - # self._logger.debug(f"Adding frame to MJPEG stream, shape={frame.shape}, dtype={frame.dtype}") - if frame.dtype != np.uint8: - vmin = float(np.min(frame)) - vmax = float(np.max(frame)) - if vmax > vmin: - frame = ((frame - vmin) / (vmax - vmin) * 255.0).astype(np.uint8) - else: - frame = np.zeros_like(frame, dtype=np.uint8) - - # Encode as JPEG - encode_params = [cv2.IMWRITE_JPEG_QUALITY, self._jpeg_quality] - success, encoded = cv2.imencode('.jpg', frame, encode_params) - - if success: - jpeg_bytes = encoded.tobytes() - # Build MJPEG frame with proper headers - header = ( - b'--frame\r\n' - b'Content-Type: image/jpeg\r\n' - ) - content_length = f'Content-Length: {len(jpeg_bytes)}\r\n\r\n'.encode('ascii') - mjpeg_frame = header + content_length + jpeg_bytes + b'\r\n' - - # Put in queue, drop frame if full - try: - self._mjpeg_queue.put_nowait(mjpeg_frame) - except queue.Full: - pass # Drop frame if queue is full - except Exception as e: - self._logger.debug(f"Error encoding MJPEG frame: {e}") - - def _capture_loop(self): - """ - Producer thread: continuously captures frames from camera and pushes to queue. - Drops oldest frames if queue is full to maintain low latency. - """ - self._logger.info("Capture loop started") - capture_count = 0 - last_fps_time = time.time() - - while not self._stop_capture_event.is_set(): - try: - if self._state.is_paused: - # Don't capture new frames when paused - time.sleep(0.01) - continue - - frame = self._get_latest_frame() - if frame is not None: - # Avoid unnecessary copy if detector buffer is stable - # For most cameras we need to copy, but this could be optimized per-detector - frame_copy = frame # No copy for now - benchmark first - - # Try to put in queue, drop oldest if full - try: - self._raw_frame_queue.put_nowait(frame_copy) - capture_count += 1 - - # Update capture FPS - current_time = time.time() - elapsed = current_time - last_fps_time - if elapsed >= 1.0: - self._state.capture_fps = capture_count / elapsed - capture_count = 0 - last_fps_time = current_time - except queue.Full: - # Queue full - drop oldest frame and add new one - try: - self._raw_frame_queue.get_nowait() - self._raw_frame_queue.put_nowait(frame_copy) - self._state.dropped_frames += 1 - except: - pass - else: - time.sleep(0.001) # Short sleep if no frame available - - except Exception as e: - self._logger.error(f"Error in capture loop: {e}") - self._logger.debug(traceback.format_exc()) - time.sleep(0.01) - - self._logger.info("Capture loop stopped") - - def _processing_loop(self): - """ - Consumer thread: processes frames from queue at specified update_freq. - In pause mode, reprocesses last frame without pulling from queue. - """ - self._logger.info("Processing loop started") - process_count = 0 - last_fps_time = time.time() - process_time_sum = 0.0 - - while not self._stop_processing_event.is_set(): - try: - # Calculate minimum interval between processing - min_interval = 1.0 / self._params.update_freq if self._params.update_freq > 0 else 0.0 - - current_time = time.time() - - # Check if enough time has passed - if current_time - self._state.last_process_time < min_interval: - time.sleep(min_interval * 0.1) # Short sleep to prevent CPU spinning - continue - - # Check if paused - if self._state.is_paused: - # In pause mode, process last frame continuously at update rate - if self._last_frame is not None: - process_start = time.time() - with self._processing_lock: - self._process_frame(self._last_frame) - self._state.last_process_time = current_time - process_time = time.time() - process_start - process_time_sum += process_time - process_count += 1 - else: - # Normal processing mode - get frame from queue - try: - frame = self._raw_frame_queue.get(timeout=0.1) - self._state.frame_count += 1 - self._last_frame = frame # Store for pause mode (already copied in capture) - - process_start = time.time() - with self._processing_lock: - self._process_frame(frame) - self._state.last_process_time = current_time - - process_time = time.time() - process_start - process_time_sum += process_time - process_count += 1 - - except queue.Empty: - continue - - # Update processing FPS and avg time - elapsed = current_time - last_fps_time - if elapsed >= 1.0: - self._state.processing_fps = process_count / elapsed - if process_count > 0: - self._state.avg_process_time = process_time_sum / process_count - process_count = 0 - process_time_sum = 0.0 - last_fps_time = current_time - - # Log benchmarks if enabled - if self._params.enable_benchmarking: - self._logger.info( - f"Performance: capture={self._state.capture_fps:.1f} fps, " - f"process={self._state.processing_fps:.1f} fps, " - f"avg_time={self._state.avg_process_time*1000:.1f} ms, " - f"dropped={self._state.dropped_frames}" - ) - - except Exception as e: - self._logger.error(f"Error in processing loop: {e}") - self._logger.debug(traceback.format_exc()) - time.sleep(0.1) - - self._logger.info("Processing loop stopped") - - @staticmethod - def _multiprocessing_worker(input_queue, output_queue, stop_event, initial_params_dict, logger_name): - """ - Separate process worker for hologram processing (bypasses GIL). - - This runs in a separate process to avoid Python GIL limitations. - Receives (frame, params_dict) tuples via input_queue, processes them, - sends results to output_queue. - - Args: - input_queue: multiprocessing.Queue for receiving (frame, params_dict) tuples - output_queue: multiprocessing.Queue for sending results - stop_event: multiprocessing.Event for shutdown signal - initial_params_dict: Initial parameters (for FFT worker config) - logger_name: Name for logger in this process - """ - import logging - logger = logging.getLogger(logger_name) - logger.info("Multiprocessing worker started") - - # Get initial params for FFT configuration - initial_params = InLineHoloParams(**initial_params_dict) - - # Choose FFT implementation (fixed at startup based on initial config) - if initial_params.use_scipy_fft and hasSciPyFFT: - def FT(x, workers): - return scipy_fft.fftshift(scipy_fft.fft2(x, workers=workers)) - def iFT(x, workers): - return scipy_fft.ifft2(scipy_fft.ifftshift(x), workers=workers) - use_scipy = True - else: - def FT(x, workers): - return np.fft.fftshift(np.fft.fft2(x)) - def iFT(x, workers): - return np.fft.ifft2(np.fft.ifftshift(x)) - use_scipy = False - - logger.debug(f"Worker initialized with scipy_fft={use_scipy}") - - # Kernel cache for this process - keys include all relevant parameters - kernel_cache = {} - - def get_fresnel_kernel(shape, params): - """Local kernel cache function with full parameter key""" - nx, ny = shape[1], shape[0] - ps = params.pixelsize * params.binning - # Cache key includes all kernel-affecting parameters - cache_key = (nx, ny, ps, params.wavelength, params.dz, params.use_float32) - - if cache_key in kernel_cache: - return kernel_cache[cache_key] - - grid_size_x = ps * nx - grid_size_y = ps * ny - - dtype = np.float32 if params.use_float32 else np.float64 - fx = np.linspace(-(nx-1)/2*(1/grid_size_x), (nx-1)/2*(1/grid_size_x), nx, dtype=dtype) - fy = np.linspace(-(ny-1)/2*(1/grid_size_y), (ny-1)/2*(1/grid_size_y), ny, dtype=dtype) - - phase = 1j * np.pi * params.wavelength * params.dz - hfx = np.exp(phase * fx**2) - hfy = np.exp(phase * fy**2) - - kernel_cache[cache_key] = (hfx, hfy) - - # Limit cache size to prevent memory issues - if len(kernel_cache) > 10: - # Remove oldest entries - keys_to_remove = list(kernel_cache.keys())[:-5] - for key in keys_to_remove: - del kernel_cache[key] - - return hfx, hfy - - def process_hologram(gray_roi, params): - """Process hologram in worker process with current parameters""" - dtype = np.float32 if params.use_float32 else np.float64 - E0 = np.sqrt(gray_roi.astype(dtype)) - - # Get kernel (uses current params for cache key) - hfx, hfy = get_fresnel_kernel(E0.shape, params) - - # Propagate - E0fft = FT(E0, params.fft_workers) - G = E0fft * hfx - G *= hfy[:, None] - Ef = iFT(G, params.fft_workers) - - # Return intensity - return np.real(Ef * np.conj(Ef)) - - # Processing loop - while not stop_event.is_set(): - try: - # Get (frame, params_dict) tuple from queue with timeout - data = input_queue.get(timeout=0.1) - if data is None: - continue - - # Unpack frame and current parameters - frame_data, params_dict = data - - # Reconstruct params from dict (gets current values from main process) - params = InLineHoloParams(**params_dict) - - # Process with current parameters - result = process_hologram(frame_data, params) - - # Send result - try: - output_queue.put_nowait(result) - except: - pass # Drop if output queue full - - except queue.Empty: - continue - except Exception as e: - logger.error(f"Error in multiprocessing worker: {e}") - logger.debug(traceback.format_exc()) - - logger.info("Multiprocessing worker stopped") - - def _processing_loop_with_mp(self): - """ - Processing loop variant that uses multiprocessing worker. - Pulls frames from queue, preprocesses, sends (frame, params) to worker process, receives results. - Parameters are sent with each frame to ensure API changes take effect immediately. - """ - self._logger.info("Processing loop (multiprocessing mode) started") - process_count = 0 - last_fps_time = time.time() - process_time_sum = 0.0 - - while not self._stop_processing_event.is_set(): - try: - min_interval = max(1.0 / self._params.update_freq if self._params.update_freq > 0 else 0.2, 0.2) - current_time = time.time() - - if current_time - self._state.last_process_time < min_interval: - time.sleep(min_interval * 0.1) - continue - - # Get current parameters snapshot for worker - current_params_dict = self._params.to_dict() - - if self._state.is_paused: - # Pause mode - send last preprocessed frame with current params - if self._last_frame is not None: - # Preprocess - gray_roi = self._preprocess_frame_for_worker(self._last_frame) - - # Send (frame, params) tuple to worker - try: - self._mp_input_queue.put_nowait((gray_roi, current_params_dict)) - except: - pass - - # Get result from worker - try: - result = self._mp_output_queue.get(timeout=min_interval) - self.sigHoloImageComputed.emit(result, "inline_holo") - self._state.processed_count += 1 - if self._state.is_streaming: - self._add_to_mjpeg_stream(result) - self._state.last_process_time = current_time - except queue.Empty: - pass - else: - # Normal mode - try: - frame = self._raw_frame_queue.get(timeout=0.1) - self._state.frame_count += 1 - self._last_frame = frame - - process_start = time.time() - - # Preprocess (ROI extraction, etc.) - gray_roi = self._preprocess_frame_for_worker(frame) - - # Send (frame, params) tuple to worker process - try: - self._mp_input_queue.put_nowait((gray_roi, current_params_dict)) - except: - pass # Drop if queue full - - # Get result from worker - try: - result = self._mp_output_queue.get(timeout=min_interval) - self.sigHoloImageComputed.emit(result, "inline_holo") - self._state.processed_count += 1 - if self._state.is_streaming: - self._add_to_mjpeg_stream(result) - - process_time = time.time() - process_start - process_time_sum += process_time - process_count += 1 - self._state.last_process_time = current_time - except queue.Empty: - pass - - except queue.Empty: - continue - - # Update metrics - elapsed = current_time - last_fps_time - if elapsed >= 1.0: - self._state.processing_fps = process_count / elapsed - if process_count > 0: - self._state.avg_process_time = process_time_sum / process_count - process_count = 0 - process_time_sum = 0.0 - last_fps_time = current_time - - if self._params.enable_benchmarking: - self._logger.info( - f"Performance (MP): capture={self._state.capture_fps:.1f} fps, " - f"process={self._state.processing_fps:.1f} fps, " - f"avg_time={self._state.avg_process_time*1000:.1f} ms, " - f"dropped={self._state.dropped_frames}" - ) - - except Exception as e: - self._logger.error(f"Error in multiprocessing loop: {e}") - self._logger.debug(traceback.format_exc()) - time.sleep(0.1) - - self._logger.info("Processing loop (multiprocessing mode) stopped") - - def _preprocess_frame_for_worker(self, image): - """ - Preprocess frame before sending to worker process. - Does everything except FFT-based propagation. - """ - binned = self._apply_binning(image) - roi = self._extract_roi(binned) - gray = self._extract_color_channel(roi) - gray = self._apply_transforms(gray) - return gray - - # ========================= - # API: Parameter Control - # ========================= - @APIExport(runOnUIThread=True) - def get_parameters_inlineholo(self) -> Dict[str, Any]: - """ - Get current hologram processing parameters - - Returns: - Dictionary with all parameters - - Example: - { - "pixelsize": 3.45e-6, - "wavelength": 488e-9, - "na": 0.3, - "dz": 0.005, - "roi_center": [512, 512], - "roi_size": 256, - "color_channel": "green", - "flip_x": false, - "flip_y": false, - "rotation": 0, - "update_freq": 10.0, - "binning": 1 - } - """ - return self._params.to_dict() - - @APIExport(runOnUIThread=True, requestType="POST") - def set_parameters_inlineholo(self, params: Dict[str, Any]) -> Dict[str, Any]: - """ - Update hologram processing parameters - - Args: - params: Dictionary with parameter updates - - pixelsize: float (meters) - - wavelength: float (meters) - - na: float - - dz: float (meters) - - roi_center: [x, y] - - roi_size: int - - color_channel: str ("red", "green", "blue") - - flip_x: bool - - flip_y: bool - - rotation: int (0, 90, 180, 270) - - update_freq: float (Hz) - - binning: int (1, 2, 4, etc.) - - use_scipy_fft: bool - - fft_workers: int - - use_float32: bool - - enable_benchmarking: bool - - Returns: - Updated parameters dictionary - - Example request: - {"dz": 0.005, "wavelength": 488e-9, "binning": 2, "use_scipy_fft": true} - """ - # Check if any kernel-affecting parameters changed - kernel_params = {'pixelsize', 'wavelength', 'dz', 'binning'} - needs_cache_invalidation = any(key in kernel_params for key in params.keys()) - - with self._processing_lock: - for key, value in params.items(): - if hasattr(self._params, key): - setattr(self._params, key, value) - - # Invalidate kernel cache if needed - if needs_cache_invalidation: - self._invalidate_kernel_cache() - self._logger.info("Kernel cache invalidated due to parameter change") - - self._emit_state_changed() - return self._params.to_dict() - - @APIExport(runOnUIThread=True) - def set_pixelsize_inlineholo(self, pixelsize: float) -> Dict[str, Any]: - """Set pixel size in meters""" - return self.set_parameters_inlineholo({"pixelsize": pixelsize}) - - @APIExport(runOnUIThread=True) - def set_wavelength_inlineholo(self, wavelength: float) -> Dict[str, Any]: - """Set wavelength in meters""" - return self.set_parameters_inlineholo({"wavelength": wavelength}) - - @APIExport(runOnUIThread=True) - def set_dz_inlineholo(self, dz: float) -> Dict[str, Any]: - """Set propagation distance in meters""" - return self.set_parameters_inlineholo({"dz": dz}) - - @APIExport(runOnUIThread=True) - def set_roi_inlineholo(self, center_x: int=None, center_y: int=None, size: int=256) -> Dict[str, Any]: - """ - Set ROI center and size - example request: - {"center": [512, 512], "size": 256} - """ - center = [center_x, center_y] if center_x is not None and center_y is not None else None - return self.set_parameters_inlineholo({"roi_center": center, "roi_size": size}) - - @APIExport(runOnUIThread=True) - def set_binning_inlineholo(self, binning: int) -> Dict[str, Any]: - """ - Set binning factor (1, 2, 4, etc.) - Note: Pixel size in reconstruction kernel is automatically adjusted - """ - return self.set_parameters_inlineholo({"binning": binning}) - - # ========================= - # API: Processing Control - # ========================= - @APIExport(runOnUIThread=True) - def get_state_inlineholo(self) -> Dict[str, Any]: - """Get current processing state""" - return self._state.to_dict() - - @APIExport(runOnUIThread=True) - def start_processing_inlineholo(self) -> Dict[str, Any]: - """ - Start hologram processing - - Returns: - Current state dictionary - """ - with self._processing_lock: - self._state.is_processing = True - self._state.is_paused = False - self._state.frame_count = 0 - self._state.processed_count = 0 - self._state.dropped_frames = 0 - self._state.last_process_time = 0.0 - self._state.capture_fps = 0.0 - self._state.processing_fps = 0.0 - self._state.avg_process_time = 0.0 - - # Clear frame queue - while not self._raw_frame_queue.empty(): - try: - self._raw_frame_queue.get_nowait() - except queue.Empty: - break - - # Ensure camera is running - self._ensure_camera_running() - - # Start capture thread - if self._capture_thread is None or not self._capture_thread.is_alive(): - self._stop_capture_event.clear() - self._capture_thread = threading.Thread(target=self._capture_loop, daemon=True) - self._capture_thread.start() - - # Start processing (threading or multiprocessing mode) - if self._params.use_multiprocessing: - # Multiprocessing mode - if self._processing_process is None or not self._processing_process.is_alive(): - # Create multiprocessing queues and event - self._mp_input_queue = multiprocessing.Queue(maxsize=2) - self._mp_output_queue = multiprocessing.Queue(maxsize=2) - self._mp_stop_event = multiprocessing.Event() - - # Start worker process - self._processing_process = multiprocessing.Process( - target=self._multiprocessing_worker, - args=( - self._mp_input_queue, - self._mp_output_queue, - self._mp_stop_event, - self._params.to_dict(), - self._logger.name - ), - daemon=True - ) - self._processing_process.start() - - # Start coordinator thread - self._stop_processing_event.clear() - self._processing_thread = threading.Thread( - target=self._processing_loop_with_mp, - daemon=True - ) - self._processing_thread.start() - - self._logger.info( - f"Started inline hologram processing (MULTIPROCESSING mode, " - f"scipy_fft={self._params.use_scipy_fft and hasSciPyFFT}, " - f"workers={self._params.fft_workers}, " - f"float32={self._params.use_float32})" - ) - else: - # Threading mode - if self._processing_thread is None or not self._processing_thread.is_alive(): - self._stop_processing_event.clear() - self._processing_thread = threading.Thread(target=self._processing_loop, daemon=True) - self._processing_thread.start() - - self._logger.info( - f"Started inline hologram processing (THREADING mode, " - f"scipy_fft={self._params.use_scipy_fft and hasSciPyFFT}, " - f"workers={self._params.fft_workers}, " - f"float32={self._params.use_float32})" - ) - - self._emit_state_changed() - - return self._state.to_dict() - - @APIExport(runOnUIThread=True) - def stop_processing_inlineholo(self) -> Dict[str, Any]: - """ - Stop hologram processing - - Returns: - Current state dictionary - """ - with self._processing_lock: - self._state.is_processing = False - self._state.is_paused = False - - # Stop multiprocessing worker if active - if self._mp_stop_event is not None: - self._mp_stop_event.set() - - if self._processing_process is not None and self._processing_process.is_alive(): - self._processing_process.join(timeout=2.0) - if self._processing_process.is_alive(): - self._processing_process.terminate() - self._processing_process = None - - # Stop both threads - self._stop_capture_event.set() - self._stop_processing_event.set() - - # Clear queues - while not self._raw_frame_queue.empty(): - try: - self._raw_frame_queue.get_nowait() - except queue.Empty: - break - - if self._mp_input_queue is not None: - while not self._mp_input_queue.empty(): - try: - self._mp_input_queue.get_nowait() - except: - break - - if self._mp_output_queue is not None: - while not self._mp_output_queue.empty(): - try: - self._mp_output_queue.get_nowait() - except: - break - - self._logger.info("Stopped hologram processing") - self._emit_state_changed() - - return self._state.to_dict() - - @APIExport(runOnUIThread=True) - def pause_processing_inlineholo(self) -> Dict[str, Any]: - """ - Pause processing - will continuously process last frame at update rate - - Returns: - Current state dictionary - """ - with self._processing_lock: - if self._state.is_processing: - self._state.is_paused = True - - self._logger.info("Paused hologram processing (processing last frame)") - self._emit_state_changed() - - return self._state.to_dict() - - @APIExport(runOnUIThread=True) - def resume_processing_inlineholo(self) -> Dict[str, Any]: - """ - Resume processing - will process incoming frames continuously - - Returns: - Current state dictionary - """ - with self._processing_lock: - if self._state.is_processing: - self._state.is_paused = False - - self._logger.info("Resumed hologram processing") - self._emit_state_changed() - - return self._state.to_dict() - - @APIExport(runOnUIThread=False) - def mjpeg_stream_inlineholo(self, startStream: bool = True, jpeg_quality: int = 85): - """ - HTTP endpoint for MJPEG streaming of reconstructed holograms. - - Args: - startStream: Whether to start streaming (True) or stop (False) - jpeg_quality: JPEG compression quality (0-100, default 85) - - Returns: - StreamingResponse with MJPEG data or status message - - Example: - GET /holocontroller/mjpeg_stream_inline?startStream=true&jpeg_quality=90 - """ - try: - from fastapi.responses import StreamingResponse - except ImportError: - return {"status": "error", "message": "FastAPI not available"} - - if not hasCV2: - return {"status": "error", "message": "opencv-python required for MJPEG streaming"} - - if not startStream: - # Stop streaming - with self._processing_lock: - self._state.is_streaming = False - # Clear queue - while not self._mjpeg_queue.empty(): - try: - self._mjpeg_queue.get_nowait() - except queue.Empty: - break - self._logger.info("Stopped MJPEG stream") - self._emit_state_changed() - return {"status": "success", "message": "stream stopped"} - - # Update JPEG quality - self._jpeg_quality = max(0, min(100, jpeg_quality)) - - # Start streaming - with self._processing_lock: - self._state.is_streaming = True - - # Ensure processing is running - if not self._state.is_processing: - self.start_processing_inlineholo() - - self._logger.info(f"Started MJPEG stream (quality={self._jpeg_quality})") - self._emit_state_changed() - - # Create generator for streaming response - def frame_generator(): - """Generator that yields MJPEG frames.""" - try: - while self._state.is_streaming: - try: - frame = self._mjpeg_queue.get(timeout=1.0) - if frame: - yield frame - except queue.Empty: - continue - except GeneratorExit: - self._logger.info("MJPEG stream connection closed by client") - with self._processing_lock: - self._state.is_streaming = False - self._emit_state_changed() - except Exception as e: - self._logger.error(f"Error in MJPEG frame generator: {e}") - - # Return streaming response with proper headers - headers = { - "Cache-Control": "no-cache, no-store, must-revalidate", - "Pragma": "no-cache", - "Expires": "0", - "X-Accel-Buffering": "no", - "Connection": "keep-alive", - } - - return StreamingResponse( - frame_generator(), - media_type="multipart/x-mixed-replace;boundary=frame", - headers=headers - ) - - #@APIExport(runOnUIThread=True) - def process_single_frame(self, image: np.ndarray = None) -> Dict[str, Any]: - """ - Process a single frame manually - - Args: - image: Optional numpy array. If None, captures from camera - - Returns: - Result info dictionary - """ - if image is None: - # Capture from camera - image = self._capture_camera_frame() - - if image is None: - return {"success": False, "error": "No image available"} - - result = self._process_frame(image) - - return { - "success": result is not None, - "frame_shape": result.shape if result is not None else None - } - - def _ensure_camera_running(self): - """Ensure camera is running, start if necessary""" - try: - detector = self._master.detectorsManager[self.camera] - if not detector._running: - self._logger.info(f"Starting camera {self.camera}") - detector.startAcquisition() - except Exception as e: - self._logger.error(f"Failed to start camera: {e}") - - def _capture_camera_frame(self): - """Capture a single frame from camera""" - try: - detector = self._master.detectorsManager[self.camera] - return detector.getLatestFrame() - except Exception as e: - self._logger.error(f"Failed to capture frame: {e}") - return None - - def _emit_state_changed(self): - """Emit state changed signal""" - self.sigHoloStateChanged.emit(self._state.to_dict()) - - # ========================= - # Legacy GUI compatibility (if not headless) - # ========================= - def setShowInLineHolo(self, enabled): - """Legacy: Show or hide inline hologram processing""" - if enabled: - self.start_processing() - else: - self.stop_processing() - - def changeRate(self, updateRate): - """Legacy: Change update rate""" - if updateRate == "" or updateRate <= 0: - updateRate = 1 - self.set_parameters_inlineholo({"update_freq": updateRate}) - - def inLineValueChanged(self, magnitude): - """Legacy: Change inline propagation distance""" - dz = magnitude * 1e-3 # Convert to meters - self.set_dz(dz) - - def displayImage(self, im, name): - """Legacy: Display image in napari widget""" - if IS_HEADLESS: - return - - if im.dtype == complex or np.iscomplexobj(im): - self._widget.setImage(np.abs(im), name + "_abs") - self._widget.setImage(np.angle(im), name + "_angle") - else: - self._widget.setImage(np.abs(im), name) - - - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imcontrol/controller/controllers/JoystickController.py b/imswitch/imcontrol/controller/controllers/JoystickController.py deleted file mode 100644 index 60c1cadcd..000000000 --- a/imswitch/imcontrol/controller/controllers/JoystickController.py +++ /dev/null @@ -1,67 +0,0 @@ - -from ..basecontrollers import LiveUpdatedController -from imswitch import IS_HEADLESS - -class JoystickController(LiveUpdatedController): - """ Linked to JoystickWidget.""" - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - # scaler - self.scaler = 100 - - # initialize the positioner - self.positioner_name = self._master.positionersManager.getAllDeviceNames()[0] - self.positioner = self._master.positionersManager[self.positioner_name] - - if IS_HEADLESS: - return - - self._widget.sigJoystickXY.connect(self.moveXY) - self._widget.sigJoystickZA.connect(self.moveZA) - - def moveXY(self, x, y): - if abs(x)>0 or abs(y) >0: - self.positioner.moveForever(speed=(0, x*self.scaler, y*self.scaler, 0), is_stop=False) - else: - for i in range(3): - self.stop("X") - self.stop("Y") - return x, y - - def moveZA(self, a, z): - if abs(a)>0 or abs(z) >0: - self.positioner.moveForever(speed=(a*self.scaler, 0, 0, z*self.scaler), is_stop=False) - else: - for i in range(3): - # currently it takes a few trials to stop the stage - self.stop("A") - self.stop("Z") - return a, z - - def stop(self, axis="X"): - self.positioner.forceStop(axis) - - - - - - - - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imcontrol/controller/controllers/LEDController.py b/imswitch/imcontrol/controller/controllers/LEDController.py index 0c2e48f5d..309c2f4a5 100644 --- a/imswitch/imcontrol/controller/controllers/LEDController.py +++ b/imswitch/imcontrol/controller/controllers/LEDController.py @@ -27,6 +27,11 @@ def __init__(self, *args, **kwargs): self.setSharedAttr(lName, _enabledAttr, self._widget.isLEDActive(lName)) self.setSharedAttr(lName, _valueAttr, self._widget.getValue(lName)) + # Store value units for metadata context + self.setSharedAttr(lName, _unitsAttr, lManager.valueUnits) + # Store range info for metadata + self.setSharedAttr(lName, _rangeMinAttr, lManager.valueRangeMin) + self.setSharedAttr(lName, _rangeMaxAttr, lManager.valueRangeMax) # Load presets for ledPresetName in self._setupInfo.ledPresets: @@ -248,6 +253,9 @@ def changeScanPower(self, ledName, ledValue): _attrCategory = 'LED' _enabledAttr = 'Enabled' _valueAttr = 'Value' +_unitsAttr = 'Units' +_rangeMinAttr = 'RangeMin' +_rangeMaxAttr = 'RangeMax' # Copyright (C) 2020-2024 ImSwitch developers diff --git a/imswitch/imcontrol/controller/controllers/LaserController.py b/imswitch/imcontrol/controller/controllers/LaserController.py index 9c8faef27..523d5c40d 100644 --- a/imswitch/imcontrol/controller/controllers/LaserController.py +++ b/imswitch/imcontrol/controller/controllers/LaserController.py @@ -35,6 +35,10 @@ def __init__(self, *args, **kwargs): lManager.valueRangeStep if lManager.valueRangeStep is not None else None, (lManager.freqRangeMin, lManager.freqRangeMax, lManager.freqRangeInit) if lManager.isModulated else (0, 0, 0)) + # Set wavelength metadata (static property from config) + if lManager.wavelength: + self.setSharedAttr(lName, _wavelengthAttr, lManager.wavelength) + if not lManager.isBinary: self.valueChanged(lName, lManager.valueRangeMin) @@ -443,6 +447,8 @@ def __init__(self, valueUnits, valueDecimals, valueRange, tickInterval, singleSt _freqEnAttr = "ModulationEnabled" _freqAttr = "Frequency" _dcAttr = "DutyCycle" +_wavelengthAttr = "WavelengthNm" +_powerMwAttr = "PowerMw" # Copyright (C) 2020-2024 ImSwitch developers diff --git a/imswitch/imcontrol/controller/controllers/MCTController.py b/imswitch/imcontrol/controller/controllers/MCTController.py deleted file mode 100644 index a8cc1e6f7..000000000 --- a/imswitch/imcontrol/controller/controllers/MCTController.py +++ /dev/null @@ -1,826 +0,0 @@ - -import os -import threading -from datetime import datetime -import time -import numpy as np -import skimage.transform as transform -from pydantic import BaseModel -from typing import Optional - -from imswitch.imcommon.framework import Signal -from imswitch.imcommon.model import dirtools, initLogger, APIExport -from ..basecontrollers import ImConWidgetController -from imswitch import IS_HEADLESS - -import h5py - - - -class MCTStatus(BaseModel): - isMCTrunning: bool = False - nImagesTaken: int = 0 - timePeriod: int = 60 - zStackEnabled: bool = False - zStackMin: Optional[int] = 0 - zStackMax: Optional[int] = 0 - zStackStep: Optional[int] = 0 - xyScanEnabled: bool = False - xScanMin: Optional[int] = 0 - xScanMax: Optional[int] = 0 - xScanStep: Optional[int] = 0 - yScanMin: Optional[int] = 0 - yScanMax: Optional[int] = 0 - yScanStep: Optional[int] = 0 - Illu1Value: Optional[int] = 0 - Illu2Value: Optional[int] = 0 - Illu3Value: Optional[int] = 0 - MCTFilename: str = " " - MCTFilePath: str = " " - -class MCTController(ImConWidgetController): - """Linked to MCTWidget.""" - - sigImageReceived = Signal() - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self._logger = initLogger(self) - - # mct parameters - self.nImagesTaken = 0 - self.timePeriod = 60 # seconds - self.zStackEnabled = False - self.zStackMin = 0 - self.zStackMax = 0 - self.zStackStep = 0 - - # xy - self.xyScanEnabled = False - self.xScanMin = 0 - self.xScanMax = 0 - self.xScanStep = 0 - self.yScanMin = 0 - self.yScanMax = 0 - self.yScanStep = 0 - - self.Illu1Value = 0 - self.Illu2Value = 0 - self.Illu3Value = 0 - self.MCTFilename = "" - self.activeIlluminations = [] - self.availableIlluminations = [] - self.MCTFilePath = " " - - # time to let hardware settle - try: - self.tWait = self._master.mctManager.tWait - except: - self.tWait = 0.1 - - # connect XY positionercanning live update https://github.com/napari/napari/issues/1110 - self.sigImageReceived.connect(self.displayImage) - - # autofocus related - self.isAutofocusRunning = False - self._commChannel.sigAutoFocusRunning.connect(self.setAutoFocusIsRunning) - - # select detectors - allDetectorNames = self._master.detectorsManager.getAllDeviceNames() - self.detector = self._master.detectorsManager[allDetectorNames[0]] - try:self.isRGB = self.detector._camera.isRGB - except: self.isRGB = False - self.detectorPixelSize = self.detector.pixelSizeUm - - # select lasers - allIlluNames = self._master.lasersManager.getAllDeviceNames()+ self._master.LEDMatrixsManager.getAllDeviceNames() - for iDevice in allIlluNames: - try: - # laser maanger - self.availableIlluminations.append(self._master.lasersManager[iDevice]) - except: - # lexmatrix manager - self.availableIlluminations.append(self._master.LEDMatrixsManager[iDevice]) - - # select stage - try: - self.positioner = self._master.positionersManager[self._master.positionersManager.getAllDeviceNames()[0]] - except: - self.positioner = None - - self.isMCTrunning = False - - # Connect MCTWidget signals - if not IS_HEADLESS: - self._widget.mctStartButton.clicked.connect(self.startMCT) - self._widget.mctStopButton.clicked.connect(self.stopMCT) - self._widget.mctShowLastButton.clicked.connect(self.showLast) - - self._widget.sigSliderIllu1ValueChanged.connect(self.valueIllu1Changed) - self._widget.sigSliderIllu2ValueChanged.connect(self.valueIllu2Changed) - self._widget.sigSliderIllu3ValueChanged.connect(self.valueIllu3Changed) - self._widget.mctShowLastButton.setEnabled(False) - - # setup gui limits for sliders - if len(self.availableIlluminations) > 0: - self._widget.sliderIllu1.setMaximum(self.availableIlluminations[0].valueRangeMax) - self._widget.sliderIllu1.setMinimum(self.availableIlluminations[0].valueRangeMin) - if len(self.availableIlluminations) > 1: - self._widget.sliderIllu2.setMaximum(self.availableIlluminations[1].valueRangeMax) - self._widget.sliderIllu2.setMinimum(self.availableIlluminations[1].valueRangeMin) - if len(self.availableIlluminations) > 2: - self._widget.sliderIllu3.setMaximum(self.availableIlluminations[2].valueRangeMax) - self._widget.sliderIllu3.setMinimum(self.availableIlluminations[2].valueRangeMin) - - - def startMCT(self): - # initilaze setup - # this is not a thread! - # this is called from the QT GUI - # start the timelapse - if not self.isMCTrunning and len(self.activeIlluminations)>0: - self.switchOffIllumination() - # GUI updates - if not IS_HEADLESS: - self._widget.mctStartButton.setEnabled(False) - self._widget.setMessageGUI("Starting timelapse...") - - - # get parameters from GUI - self.zStackMin, self.zStackMax, self.zStackStep, self.zStackEnabled = self._widget.getZStackValues() - self.xScanMin, self.xScanMax, self.xScanStep, self.yScanMin, self.yScanMax, self.yScanStep, self.xyScanEnabled = self._widget.getXYScanValues() - self.timePeriod, self.nImagesToCapture = self._widget.getTimelapseValues() - self.MCTFilename = self._widget.getFilename() - self.MCTDate = datetime.now().strftime("%Y_%m_%d-%I-%M-%S_%p") - - # reserve space for the stack - self._widget.mctShowLastButton.setEnabled(False) - - # start the timelapse - otherwise we have to wait for the first run after timePeriod to take place.. - self.startTimelapseImaging(self.timePeriod, self.nImagesToCapture, - self.MCTFilename, self.MCTDate, - self.zStackEnabled, self.zStackMin, self.zStackMax, self.zStackStep, - self.xyScanEnabled, self.xScanMin, self.xScanMax, self.xScanStep, self.yScanMin, self.yScanMax, self.yScanStep) - - else: - self.isMCTrunning = False - self._widget.mctStartButton.setEnabled(True) - - - def stopMCT(self): - self.isMCTrunning = False - - # go back to initial position - try: - if self.xyScanEnabled and self.positioner is not None: - self.positioner.move(value=(self.initialPosition[0], self.initialPosition[1]), axis="XY", is_absolute=True, is_blocking=True) - except: - pass - - # delete any existing timer - try: - del self.timer - except: - pass - - # delete any existing thread - try: - del self.MCTThread - except: - pass - - if not IS_HEADLESS: - self._widget.setMessageGUI("Stopping timelapse...") - self._widget.mctStartButton.setEnabled(True) - self._widget.setMessageGUI("Done wit timelapse...") - - - def showLast(self, isCleanStack=False): - # isCleanStack=False => subtract backgroudn or not - if hasattr(self, "LastStackIllu1ArrayLast"): - try: - #subtract background and normalize stack - if isCleanStack: LastStackIllu1ArrayLast = self.cleanStack(self.LastStackIllu1ArrayLast) - else: LastStackIllu1ArrayLast = self.LastStackIllu1ArrayLast - self._widget.setImage(LastStackIllu1ArrayLast, colormap="green", name="GFP",pixelsize=self.detectorPixelSize) - except Exception as e: - self._logger.error(e) - - if hasattr(self, "LastStackIllu2ArrayLast"): - try: - if isCleanStack: LastStackIllu2ArrayLast = self.cleanStack(self.LastStackIllu2ArrayLast) - else: LastStackIllu2ArrayLast = self.LastStackIllu2ArrayLast - self._widget.setImage(LastStackIllu2ArrayLast, colormap="red", name="SiR",pixelsize=self.detectorPixelSize) - except Exception as e: - self._logger.error(e) - - if hasattr(self, "LastStackLEDArrayLast"): - try: - if isCleanStack: LastStackLEDArrayLast = self.cleanStack(self.LastStackLEDArrayLast) - else: LastStackLEDArrayLast = self.LastStackLEDArrayLast - self._widget.setImage(LastStackLEDArrayLast, colormap="gray", name="Brightfield",pixelsize=self.detectorPixelSize) - except Exception as e: - self._logger.error(e) - - def cleanStack(self, input): - import NanoImagingPack as nip - mBackground = nip.gaussf(np.mean(input,0),10) - moutput = input/mBackground - mFluctuations = np.mean(moutput, (1,2)) - moutput /= np.expand_dims(np.expand_dims(mFluctuations,-1),-1) - return np.uint8(moutput) - - def displayStack(self, im): - """ Displays the image in the view. """ - self._widget.setImage(im) - - @APIExport(runOnUIThread=False) - def getLastMCTStack(self): - if hasattr(self, "LastStackIllu1ArrayLast"): - return self.LastStackIllu1ArrayLast - else: - return None - - @APIExport(runOnUIThread=False) - def getMCTStatus(self) -> dict: - return MCTStatus(**{"isMCTrunning":self.isMCTrunning, - "nImagesTaken":self.nImagesTaken, - "timePeriod":self.timePeriod, - "zStackEnabled":self.zStackEnabled, - "zStackMin":self.zStackMin, - "zStackMax":self.zStackMax, - "zStackStep":self.zStackStep, - "xyScanEnabled":self.xyScanEnabled, - "xScanMin":self.xScanMin, - "xScanMax":self.xScanMax, - "xScanStep":self.xScanStep, - "yScanMin":self.yScanMin, - "yScanMax":self.yScanMax, - "yScanStep":self.yScanStep, - "Illu1Value":self.Illu1Value, - "Illu2Value":self.Illu2Value, - "Illu3Value":self.Illu3Value, - "MCTFilename":self.MCTFilename, - "MCTFilepath":self.MCTFilePath}) - - @APIExport(runOnUIThread=True) - def startTimelapseImaging(self, tperiod:int=5, nImagesToCapture:int=10, - MCTFilename:str="Test", MCTDate:str="", - zStackEnabled:bool=False, zStackMin:int=0, zStackMax:int=0, zStackStep:int=0, - xyScanEnabled:bool=False, xScanMin:int=0, xScanMax:int=0, xScanStep:int=0, - yScanMin:int=0, yScanMax:int=0, yScanStep:int=0, - IlluValue1:int =-1, IlluValue2:int =-1, IlluValue3:int =-1): - # this is called periodically by the timer - if not self.isMCTrunning: - try: - # make sure there is no exisiting thrad - del self.MCTThread - except: - pass - - # get default date to not overwrite the same files - if MCTDate == "": - MCTDate = datetime.now().strftime("%Y_%m_%d-%I-%M-%S_%p") - uniqueID = np.random.randint(0, 1000) - MCTDate = MCTDate + "_"+str(uniqueID) # make sure we do not overwrite files - - # retreive from REST API - if IlluValue1>=0: self.Illu1Value = IlluValue1 - if IlluValue2>=0: self.Illu2Value = IlluValue2 - if IlluValue3>=0: self.Illu3Value = IlluValue3 - - # get active illuminations - self.activeIlluminations = [] - if self.Illu1Value>0: self.activeIlluminations.append(self.availableIlluminations[0]) - if self.Illu2Value>0 and len(self.availableIlluminations)>1: self.activeIlluminations.append(self.availableIlluminations[1]) - if self.Illu3Value>0 and len(self.availableIlluminations)>2: self.activeIlluminations.append(self.availableIlluminations[2]) - - - # this should decouple the hardware-related actions from the GUI - self.isMCTrunning = True - self.MCTThread = threading.Thread(target=self.startTimelapseImagingThread, args=(tperiod, nImagesToCapture, - MCTFilename, MCTDate, - zStackEnabled, zStackMin, zStackMax, zStackStep, - xyScanEnabled, xScanMin, xScanMax, xScanStep, - yScanMin, yScanMax, yScanStep), daemon=True) - - self.MCTThread.start() - - @APIExport(runOnUIThread=True) - def stopTimelapseImaging(self): - self.stopMCT() - - def doAutofocus(self, params, timeout=10): - self._logger.info("Autofocusing...") - self._widget.setMessageGUI("Autofocusing...") - self._commChannel.sigAutoFocus.emit(int(params["valueRange"]), int(params["valueSteps"])) - self.isAutofocusRunning = True - - try: - while self.isAutofocusRunning: - time.sleep(0.1) - t0 = time.time() - if not self.isAutofocusRunning or time.time()-t0>timeout: - self._logger.info("Autofocusing done.") - return - except Exception as e: - self._logger.error(e) - - - def startTimelapseImagingThread(self, tperiod, nImagesToCapture, - MCTFilename, MCTDate, - zStackEnabled, zStackMin, zStackMax, zStackStep, - xyScanEnabled, xScanMin, xScanMax, xScanStep, - yScanMin, yScanMax, yScanStep): - # this wil run in the background - self.nImagesTaken=0 - self.timeLast = 0 - if zStackEnabled: - nZStack = int(np.ceil((zStackMax-zStackMin)/zStackStep)) - else: - nZStack = 1 - # get current position - if self.positioner is not None: - currentPositions = self.positioner.getPosition() - self.initialPosition = (currentPositions["X"], currentPositions["Y"]) - self.initialPositionZ = currentPositions["Z"] - else: - self.initialPosition = (0,0) - self.initialPositionZ = 0 - - # HDF5 file setup: prepare data storage - fileExtension = "h5" - self.MCTFilePath = self.getSaveFilePath(date=MCTDate, - filename=MCTFilename, - extension=fileExtension) - self._logger.info(f"Saving to {self.MCTFilePath}") - self.detectorWidth, self.detectorHeight = self.detector._camera.SensorWidth, self.detector._camera.SensorHeight - if self.isRGB: - init_dims = (1, len(self.activeIlluminations), nZStack, self.detectorWidth, self.detectorHeight, 3) # time, channels, z, y, x, RGB - max_dims = (None, 3, nZStack, None, None, 3) # Allow unlimited time points and z slices - else: - init_dims = (1, len(self.activeIlluminations), nZStack, self.detectorWidth, self.detectorHeight) # time, channels, z, y, x - max_dims = (None, 3, nZStack, None, None) # Allow unlimited time points and z slices - - self.h5File = HDF5File(filename=self.MCTFilePath, init_dims=init_dims, max_dims=max_dims, isRGB=self.isRGB) - - # run as long as the MCT is active - while(self.isMCTrunning): - # stop measurement once done - if self.nImagesTaken >= nImagesToCapture: - self.isMCTrunning = False - self._logger.debug("Done with timelapse") - if not IS_HEADLESS: self._widget.mctStartButton.setEnabled(True) - break - - # initialize a run - if time.time() - self.timeLast >= (tperiod): - - # run an event - self.timeLast = time.time() # makes sure that the period is measured from launch to launch - - # reserve and free space for displayed stacks - self.LastStackIllu1 = [] - self.LastStackIllu2 = [] - self.LastStackLED = [] - - try: - ''' - AUTOFOCUS - ''' - self.performAutofocus() - - ''' - ACQUIRE CHANNELS, Z-STACKS, XY-SCANS - ''' - self.acquireCZXYScan() - - ''' - UPDATE GUI - ''' - self.updateGUI() - - #increase iterator - self.nImagesTaken += 1 - - except Exception as e: - self._logger.error("Thread closes with Error: "+str(e)) - self.isMCTrunning = False - self._logger.debug("Done with timelapse") - if not IS_HEADLESS: self._widget.mctStartButton.setEnabled(True) - return - - # pause to not overwhelm the CPU - time.sleep(0.1) - - - def updateGUI(self): - - # sneak images into arrays for displaying stack - if self.zStackEnabled and not self.xyScanEnabled: - self.LastStackIllu1ArrayLast = np.array(self.LastStackIllu1) - self.LastStackIllu2ArrayLast = np.array(self.LastStackIllu2) - self.LastStackLEDArrayLast = np.array(self.LastStackLED) - - if not IS_HEADLESS: - # update the text in the GUI - self._widget.setMessageGUI(self.nImagesTaken) - self._widget.mctShowLastButton.setEnabled(True) - - def performAutofocus(self): - if not IS_HEADLESS: autofocusParams = self._widget.getAutofocusValues() - else: return - if self.positioner is not None and self._widget.isAutofocus() and np.mod(self.nImagesTaken, int(autofocusParams['valuePeriod'])) == 0: - self._widget.setMessageGUI("Autofocusing...") - # turn on illuimination - self.activeIlluminations[0].setValue(autofocusParams["valueRange"]) - self.activeIlluminations[0].setEnabled(True) - time.sleep(self.tWait) - self.doAutofocus(autofocusParams) - self.switchOffIllumination() - - def acquireCZXYScan(self): - # precompute steps for xy scan - # snake scan - if self.xyScanEnabled: - xyScanStepsAbsolute = [] - xyScanIndices = [] - # we snake over y - fwdpath = np.arange(self.yScanMin, self.yScanMax, self.yScanStep) - bwdpath = np.flip(fwdpath) - # we increase linearly over x - for indexX, ix in enumerate(np.arange(self.xScanMin, self.xScanMax, self.xScanStep)): - if indexX%2==0: - for indexY, iy in enumerate(fwdpath): - xyScanStepsAbsolute.append([ix, iy]) - else: - for indexY, iy in enumerate(bwdpath): - xyScanStepsAbsolute.append([ix, iy]) - - # reserve space for tiled image - downScaleFactor = 4 - nTilesX = int(np.ceil((self.xScanMax-self.xScanMin)/self.xScanStep)) - nTilesY = int(np.ceil((self.yScanMax-self.yScanMin)/self.yScanStep)) - imageDimensions = self.detector.getLatestFrame().shape # self.detector._camera.CameraWidth TODO not good! - imageDimensionsDownscaled = (imageDimensions[1]//downScaleFactor, imageDimensions[0]//downScaleFactor) # Y/X - tiledImageDimensions = (nTilesX*imageDimensions[1]//downScaleFactor, nTilesY*imageDimensions[0]//downScaleFactor) - self.tiledImage = np.zeros(tiledImageDimensions) - - else: - xyScanStepsAbsolute = [[0,0]] - self.xScanMin = 0 - self.xScanMax = 0 - self.yScanMin = 0 - self.yScanMax = 0 - - - # precompute steps for z scan - if self.zStackEnabled: - zStepsAbsolute = np.arange(self.zStackMin, self.zStackMax, self.zStackStep) + self.initialPositionZ - else: - zStepsAbsolute = [self.initialPositionZ] - - - # in case something is not connected we want to reconnect! - # TODO: This should go into some function outside the MCT!!! - #if not ("IDENTIFIER_NAME" in self._master.UC2ConfigManager.ESP32.state.get_state() and self._master.UC2ConfigManager.ESP32.state.get_state()["IDENTIFIER_NAME"] == "uc2-esp"): - # mThread = threading.Thread(target=self._master.UC2ConfigManager.initSerial) - # mThread.start() - # mThread.join() - - # initialize xyz coordinates - if self.xyScanEnabled and self.positioner is not None: - self.positioner.move(value=(self.xScanMin+self.initialPosition[0],self.yScanMin+self.initialPosition[1]), axis="XY", is_absolute=True, is_blocking=True) - - # initialize iterator - - # iterate over all xy coordinates iteratively - - ''' - XY Scan - ''' - for ipos, iXYPos in enumerate(xyScanStepsAbsolute): - if not self.isMCTrunning: - break - # move to xy position is necessary - if self.xyScanEnabled and self.positioner is not None: - self.positioner.move(value=(iXYPos[0]+self.initialPosition[0],iXYPos[1]+self.initialPosition[1]), axis="XY", is_absolute=True, is_blocking=True) - - ''' - Z-stack - ''' - allZStackFrames = [] - for iZ in zStepsAbsolute: - # move to each position - if self.zStackEnabled and self.positioner is not None: - self.positioner.move(value=iZ, axis="Z", is_absolute=True, is_blocking=True) - time.sleep(self.tWait) # unshake - - ''' - Illumination - ''' - # capture image for every illumination - allChannelFrames = [] - allPositions = [] - for illuIndex, mIllumination in enumerate(self.activeIlluminations): - if mIllumination.name==self.availableIlluminations[0].name: - illuValue = self.Illu1Value - elif mIllumination.name==self.availableIlluminations[1].name: - illuValue = self.Illu2Value - elif mIllumination.name==self.availableIlluminations[2].name: - illuValue = self.Illu3Value - - # change illumination - mIllumination.setValue(illuValue) - mIllumination.setEnabled(True) - - # always mmake sure we get a frame that is not the same as the one with illumination off eventually - timeoutFrameRequest = 1 # seconds # TODO: Make dependent on exposure time - cTime = time.time() - frameSync=3 - lastFrameNumber=-1 - while(1): - # get frame and frame number to get one that is newer than the one with illumination off eventually - mFrame, currentFrameNumber = self.detector.getLatestFrame(returnFrameNumber=True) - if lastFrameNumber==-1: - # first round - lastFrameNumber = currentFrameNumber - if time.time()-cTime> timeoutFrameRequest: - # in case exposure time is too long we need break at one point - break - if currentFrameNumber <= lastFrameNumber+frameSync: - time.sleep(0.01) # off-load CPU - else: - break - # store frames - allChannelFrames.append(mFrame) - mIllumination.setEnabled(False) - - # store positions - mPositions = self.positioner.getPosition() - allPositions.append((mPositions["X"], mPositions["Y"], mPositions["Z"])) - ''' - elif mIllumination=="LEDMatrix": - self.illu.setAll(1, (self.Illu3Value,self.Illu3Value,self.Illu3Value)) - time.sleep(self.tWait) - lastFrame = self.detector.getLatestFrame() - self.LastStackLED.append(lastFrame.copy()) - ''' - allZStackFrames.append(allChannelFrames) - - # ensure all illus are off - self.switchOffIllumination() - - # save to HDF5 - if self.isRGB: - framesToSave = np.transpose(np.array(allZStackFrames), (1,0,3,2,4)) # time, # todo check order!! - else: - framesToSave = np.transpose(np.array(allZStackFrames), (1,0,2,3)) # time, - self.h5File.append_data(self.nImagesTaken, framesToSave, np.array(allPositions)) - self._logger.debug(f"Saved image {self.nImagesTaken} to HDF5") - del framesToSave - - - # reduce backlash => increase chance to endup at the same position - if self.zStackEnabled and self.positioner is not None: - self.positioner.move(value=(self.initialPositionZ), axis="Z", is_absolute=True, is_blocking=True) - - if self.xyScanEnabled: - # lets try to visualize each slice in napari - # def setImage(self, im, colormap="gray", name="", pixelsize=(1,1,1)): - # construct the tiled image - iX = int(np.floor((iXYPos[0]-self.xScanMin) // self.xScanStep)) - iY = int(np.floor((iXYPos[1]-self.yScanMin) // self.yScanStep)) - # handle rgb => turn to mono for now - ''' FIXME: This is currently not working - if len(lastFrame.shape)>2: - lastFrame = np.uint16(np.mean(lastFrame, 0)) - # add tile to large canvas - lastFrameScaled = cv2.resize(lastFrame, None, fx = 1/downScaleFactor, fy = 1/downScaleFactor, interpolation = cv2.INTER_NEAREST) - try: - self.tiledImage[int(iY*imageDimensionsDownscaled[1]):int(iY*imageDimensionsDownscaled[1]+imageDimensionsDownscaled[1]), - int(iX*imageDimensionsDownscaled[0]):int(iX*imageDimensionsDownscaled[0]+imageDimensionsDownscaled[0])] = lastFrameScaled - except Exception as e: - self._logger.error(e) - self._logger.error("Failed to parse a frame into the tiledImage array") - ''' - self.sigImageReceived.emit() # => displays image - - - # initialize xy coordinates - if self.xyScanEnabled and self.positioner is not None: - self.positioner.move(value=(self.initialPosition[0], self.initialPosition[1]), axis="XY", is_absolute=True, is_blocking=True) - if self.zStackEnabled and self.positioner is not None: - self.positioner.move(value=(self.initialPositionZ), axis="Z", is_absolute=True, is_blocking=True) - - - # disable motors to prevent overheating - if self.positioner is not None: - try:self.positioner.enalbeMotors(enable=self.positioner.is_enabled) - except: pass # special case for the ESP32 board - - def switchOffIllumination(self): - # switch off all illu sources - for mIllu in self.activeIlluminations: - mIllu.setEnabled(False) - mIllu.setValue(0) - time.sleep(0.1) - - def changeValueIlluSlider(self, currIllu, value): - allIllus = np.arange(len(self.availableIlluminations)) - # turn on current illumination - if not self.availableIlluminations[currIllu].enabled: self.availableIlluminations[currIllu].setEnabled(1) - self.availableIlluminations[currIllu].setValue(value) - - # switch off other illus - for illuIndex in allIllus: - if illuIndex != currIllu and self.availableIlluminations[illuIndex].power>0: - self.availableIlluminations[illuIndex].setValue(0) - self.availableIlluminations[illuIndex].setEnabled(0) - - def valueIllu1Changed(self, value): - # turn on current illumination based on slider value - currIllu = 0 - self.Illu1Value = value - self._widget.mctLabelIllu1.setText('Intensity (Laser 1):'+str(value)) - self.changeValueIlluSlider(currIllu, value) - - def valueIllu2Changed(self, value): - currIllu = 1 - self.Illu2Value = value - self._widget.mctLabelIllu2.setText('Intensity (Laser 2):'+str(value)) - self.changeValueIlluSlider(currIllu, value) - - def valueIllu3Changed(self, value): - currIllu = 2 - self.Illu3Value = value - self._widget.mctLabelIllu3.setText('Intensity (Laser 3):'+str(value)) - self.changeValueIlluSlider(currIllu, value) - - def __del__(self): - pass - - def getSaveFilePath(self, date, filename, extension): - mFilename = f"{date}_{filename}.{extension}" - dirPath = os.path.join(dirtools.UserFileDirs.getValidatedDataPath(), 'recordings', date) - newPath = os.path.join(dirPath,mFilename) - - if not os.path.exists(dirPath): - os.makedirs(dirPath) - - return newPath - - def setAutoFocusIsRunning(self, isRunning: bool): - # this is set by the AutofocusController once the AF is finished/initiated - self.isAutofocusRunning = isRunning - - def displayImage(self): - # a bit weird, but we cannot update outside the main thread - name = "tilescanning" - self._widget.setImage(np.uint16(self.tiledImage), colormap="gray", name=name, pixelsize=(1,1), translation=(0,0)) - - - # helper functions - def downscale_image(self, image, factor): - # Downscale the image - downscaled_image = transform.downscale_local_mean(image, (factor, factor)) - return downscaled_image - - def crop_center(self, image, size): - # Get the dimensions of the image - height, width = image.shape[:2] - - # Calculate the coordinates for cropping - start_x = max(0, int((width - size) / 2)) - start_y = max(0, int((height - size) / 2)) - end_x = min(width, start_x + size) - end_y = min(height, start_y + size) - - # Crop the image - cropped_image = image[start_y:end_y, start_x:end_x] - - return cropped_image - - -class HDF5File(object): - def __init__(self, filename, init_dims, max_dims=None, isRGB=False): - self.filename = filename - self.init_dims = init_dims # time, channels, z, y, x - self.max_dims = max_dims # time, channels, z, y, x - self.isRGB=isRGB - self.create_dataset() - - def create_dataset(self): - with h5py.File(self.filename, 'w') as file: - # Create a resizable dataset for the image data - dset = file.create_dataset('ImageData', shape=self.init_dims, maxshape=self.max_dims, dtype='uint16', compression="gzip") - - # Initialize a group for storing metadata - meta_group = file.create_group('Metadata') - - def append_data(self, timepoint, frame_data, xyz_coordinates): - with h5py.File(self.filename, 'a') as file: - dset = file['ImageData'] - meta_group = file['Metadata'] - - # Resize the dataset to accommodate the new timepoint - current_size = dset.shape[0] - dset.resize(current_size + 1, axis=0) - - # Add the new frame data - try: - if self.isRGB: - dset[current_size, :, :, :, :, :] = np.uint16(frame_data) - else: - dset[current_size, :, :, :, :] = np.uint16(frame_data) - except: - # in case X/Y are swapped - if self.isRGB: - dset[current_size, :, :, :, :, :] = np.transpose(np.uint16(frame_data), (0,1,2,4,3)) - else: - dset[current_size, :, :, :, :] = np.transpose(np.uint16(frame_data), (0,1,3,2)) - - # Add metadata for the new frame - for channel, xyz in enumerate(xyz_coordinates): - meta_group.create_dataset(f'Time_{timepoint}_Channel_{channel}', data=np.float32(xyz)) - - - -''' -Crosscolleration based drift correction - if False and not self.xyScanEnabled: - # treat images - imageStack = self.LastStackIllu2 # FIXME: Hardcoded - imageStack = self.LastStackLED # FIXME: Hardcoded - - driftCorrectionDownScaleFactor = 5 - driftCorrectionCropSize = 800 - iShift = [0,0] - imageList = [] - - # convert to list if necessary - if type(imageStack)!=list or len(imageStack)<2: - imageStack = list(imageStack) - - # image processing - for iImage in imageStack: - if len(iImage.shape)>2: - # if RGB => make mono - iImage = np.mean(iImage, -1) - image = self.crop_center(iImage, driftCorrectionCropSize) - image = self.downscale_image(image, driftCorrectionDownScaleFactor) - imageList.append(image) - - # remove background - imageList = np.array(imageList) - if len(imageList.shape)<3: - imageList = np.expand_dims(imageList,0) - imageList = imageList/ndi.filters.gaussian_filter(np.mean(imageList,0), 10) - - # Find max focus - bestFocus = 0 - bestFocusIndex = 0 - for index, image in enumerate(imageList): - # remove high frequencies - imagearraygf = ndi.filters.gaussian_filter(image, 3) - - # compute focus metric - focusValue = np.mean(ndi.filters.laplace(imagearraygf)) - if focusValue > bestFocus: - bestFocus = focusValue - bestFocusIndex = index - - # Align the images - image2 = np.std(imageList, (0)) - - #image2 = scipy.ndimage.gaussian_filter(image2, sigma=10) - if self.nImagesTaken > 0: - shift, error, diffphase = phase_cross_correlation(image1, image2) - iShift += (shift) - - # Shift image2 to align with image1 - image = imageList[bestFocusIndex] - #aligned_image = np.roll(image, int(iShift[1]), axis=1) - #aligned_image = np.roll(aligned_image,int(iShift[0]), axis=0) - self.positioner.move(value=(self.initialPosition[0]+shift[1], self.initialPosition[1]+shift[0]), axis="XY", is_absolute=True, is_blocking=True) - - image1 = image2.copy() - - #save values - #make sure not to have too large travelrange after last (e.g. initial position + 2*shift)) -''' -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imcontrol/controller/controllers/MCTController.py_OLD b/imswitch/imcontrol/controller/controllers/MCTController.py_OLD deleted file mode 100644 index 3268771e9..000000000 --- a/imswitch/imcontrol/controller/controllers/MCTController.py_OLD +++ /dev/null @@ -1,832 +0,0 @@ - -import os -import threading -from datetime import datetime -import time -import cv2 -import matplotlib.pyplot as plt -import numpy as np -import scipy.ndimage as ndi -import scipy.signal as signal -import skimage.transform as transform -import tifffile as tif -from pydantic import BaseModel -from typing import Optional - -from imswitch.imcommon.framework import Signal, Thread, Worker, Mutex, Timer -from imswitch.imcommon.model import dirtools, initLogger, APIExport -from skimage.registration import phase_cross_correlation -from ..basecontrollers import ImConWidgetController -from imswitch import IS_HEADLESS - -import h5py -import numpy as np - - - -class MCTStatus(BaseModel): - isMCTrunning: bool = False - nImagesTaken: int = 0 - timePeriod: int = 60 - zStackEnabled: bool = False - zStackMin: Optional[int] = 0 - zStackMax: Optional[int] = 0 - zStackStep: Optional[int] = 0 - xyScanEnabled: bool = False - xScanMin: Optional[int] = 0 - xScanMax: Optional[int] = 0 - xScanStep: Optional[int] = 0 - yScanMin: Optional[int] = 0 - yScanMax: Optional[int] = 0 - yScanStep: Optional[int] = 0 - Illu1Value: Optional[int] = 0 - Illu2Value: Optional[int] = 0 - Illu3Value: Optional[int] = 0 - MCTFilename: str = " " - MCTFilePath: str = " " - -class MCTController(ImConWidgetController): - """Linked to MCTWidget.""" - - sigImageReceived = Signal() - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self._logger = initLogger(self) - - # mct parameters - self.nImagesTaken = 0 - self.timePeriod = 60 # seconds - self.zStackEnabled = False - self.zStackMin = 0 - self.zStackMax = 0 - self.zStackStep = 0 - - # xy - self.xyScanEnabled = False - self.xScanMin = 0 - self.xScanMax = 0 - self.xScanStep = 0 - self.yScanMin = 0 - self.yScanMax = 0 - self.yScanStep = 0 - - self.Illu1Value = 0 - self.Illu2Value = 0 - self.Illu3Value = 0 - self.MCTFilename = "" - self.activeIlluminations = [] - self.availableIlluminations = [] - self.MCTFilePath = " " - - # time to let hardware settle - try: - self.tWait = self._master.mctManager.tWait - except: - self.tWait = 0.1 - - # connect XY positionercanning live update https://github.com/napari/napari/issues/1110 - self.sigImageReceived.connect(self.displayImage) - - # autofocus related - self.isAutofocusRunning = False - self._commChannel.sigAutoFocusRunning.connect(self.setAutoFocusIsRunning) - - # select detectors - allDetectorNames = self._master.detectorsManager.getAllDeviceNames() - self.detector = self._master.detectorsManager[allDetectorNames[0]] - self.isRGB = self.detector._camera.isRGB - self.detectorPixelSize = self.detector.pixelSizeUm - - # select lasers - allIlluNames = self._master.lasersManager.getAllDeviceNames()+ self._master.LEDMatrixsManager.getAllDeviceNames() - for iDevice in allIlluNames: - try: - # laser maanger - self.availableIlluminations.append(self._master.lasersManager[iDevice]) - except: - # lexmatrix manager - self.availableIlluminations.append(self._master.LEDMatrixsManager[iDevice]) - - # select stage - try: - self.positioner = self._master.positionersManager[self._master.positionersManager.getAllDeviceNames()[0]] - except: - self.positioner = None - - self.isMCTrunning = False - - # Connect MCTWidget signals - if not IS_HEADLESS: - self._widget.mctStartButton.clicked.connect(self.startMCT) - self._widget.mctStopButton.clicked.connect(self.stopMCT) - self._widget.mctShowLastButton.clicked.connect(self.showLast) - - self._widget.sigSliderIllu1ValueChanged.connect(self.valueIllu1Changed) - self._widget.sigSliderIllu2ValueChanged.connect(self.valueIllu2Changed) - self._widget.sigSliderIllu3ValueChanged.connect(self.valueIllu3Changed) - self._widget.mctShowLastButton.setEnabled(False) - - # setup gui limits for sliders - if len(self.availableIlluminations) > 0: - self._widget.sliderIllu1.setMaximum(self.availableIlluminations[0].valueRangeMax) - self._widget.sliderIllu1.setMinimum(self.availableIlluminations[0].valueRangeMin) - if len(self.availableIlluminations) > 1: - self._widget.sliderIllu2.setMaximum(self.availableIlluminations[1].valueRangeMax) - self._widget.sliderIllu2.setMinimum(self.availableIlluminations[1].valueRangeMin) - if len(self.availableIlluminations) > 2: - self._widget.sliderIllu3.setMaximum(self.availableIlluminations[2].valueRangeMax) - self._widget.sliderIllu3.setMinimum(self.availableIlluminations[2].valueRangeMin) - - - def startMCT(self): - # initilaze setup - # this is not a thread! - # this is called from the QT GUI - # start the timelapse - if not self.isMCTrunning and len(self.activeIlluminations)>0: - self.switchOffIllumination() - # GUI updates - if not IS_HEADLESS: - self._widget.mctStartButton.setEnabled(False) - self._widget.setMessageGUI("Starting timelapse...") - - - # get parameters from GUI - self.zStackMin, self.zStackMax, self.zStackStep, self.zStackEnabled = self._widget.getZStackValues() - self.xScanMin, self.xScanMax, self.xScanStep, self.yScanMin, self.yScanMax, self.yScanStep, self.xyScanEnabled = self._widget.getXYScanValues() - self.timePeriod, self.nImagesToCapture = self._widget.getTimelapseValues() - self.MCTFilename = self._widget.getFilename() - self.MCTDate = datetime.now().strftime("%Y_%m_%d-%I-%M-%S_%p") - - # reserve space for the stack - self._widget.mctShowLastButton.setEnabled(False) - - # start the timelapse - otherwise we have to wait for the first run after timePeriod to take place.. - self.startTimelapseImaging(self.timePeriod, self.nImagesToCapture, - self.MCTFilename, self.MCTDate, - self.zStackEnabled, self.zStackMin, self.zStackMax, self.zStackStep, - self.xyScanEnabled, self.xScanMin, self.xScanMax, self.xScanStep, self.yScanMin, self.yScanMax, self.yScanStep) - - else: - self.isMCTrunning = False - self._widget.mctStartButton.setEnabled(True) - - - def stopMCT(self): - self.isMCTrunning = False - - # go back to initial position - try: - if self.xyScanEnabled and self.positioner is not None: - self.positioner.move(value=(self.initialPosition[0], self.initialPosition[1]), axis="XY", is_absolute=True, is_blocking=True) - except: - pass - - # delete any existing timer - try: - del self.timer - except: - pass - - # delete any existing thread - try: - del self.MCTThread - except: - pass - - if not IS_HEADLESS: - self._widget.setMessageGUI("Stopping timelapse...") - self._widget.mctStartButton.setEnabled(True) - self._widget.setMessageGUI("Done wit timelapse...") - - - def showLast(self, isCleanStack=False): - # isCleanStack=False => subtract backgroudn or not - if hasattr(self, "LastStackIllu1ArrayLast"): - try: - #subtract background and normalize stack - if isCleanStack: LastStackIllu1ArrayLast = self.cleanStack(self.LastStackIllu1ArrayLast) - else: LastStackIllu1ArrayLast = self.LastStackIllu1ArrayLast - self._widget.setImage(LastStackIllu1ArrayLast, colormap="green", name="GFP",pixelsize=self.detectorPixelSize) - except Exception as e: - self._logger.error(e) - - if hasattr(self, "LastStackIllu2ArrayLast"): - try: - if isCleanStack: LastStackIllu2ArrayLast = self.cleanStack(self.LastStackIllu2ArrayLast) - else: LastStackIllu2ArrayLast = self.LastStackIllu2ArrayLast - self._widget.setImage(LastStackIllu2ArrayLast, colormap="red", name="SiR",pixelsize=self.detectorPixelSize) - except Exception as e: - self._logger.error(e) - - if hasattr(self, "LastStackLEDArrayLast"): - try: - if isCleanStack: LastStackLEDArrayLast = self.cleanStack(self.LastStackLEDArrayLast) - else: LastStackLEDArrayLast = self.LastStackLEDArrayLast - self._widget.setImage(LastStackLEDArrayLast, colormap="gray", name="Brightfield",pixelsize=self.detectorPixelSize) - except Exception as e: - self._logger.error(e) - - def cleanStack(self, input): - import NanoImagingPack as nip - mBackground = nip.gaussf(np.mean(input,0),10) - moutput = input/mBackground - mFluctuations = np.mean(moutput, (1,2)) - moutput /= np.expand_dims(np.expand_dims(mFluctuations,-1),-1) - return np.uint8(moutput) - - def displayStack(self, im): - """ Displays the image in the view. """ - self._widget.setImage(im) - - @APIExport(runOnUIThread=False) - def getLastMCTStack(self): - if hasattr(self, "LastStackIllu1ArrayLast"): - return self.LastStackIllu1ArrayLast - else: - return None - - @APIExport(runOnUIThread=False) - def getMCTStatus(self) -> dict: - return MCTStatus(**{"isMCTrunning":self.isMCTrunning, - "nImagesTaken":self.nImagesTaken, - "timePeriod":self.timePeriod, - "zStackEnabled":self.zStackEnabled, - "zStackMin":self.zStackMin, - "zStackMax":self.zStackMax, - "zStackStep":self.zStackStep, - "xyScanEnabled":self.xyScanEnabled, - "xScanMin":self.xScanMin, - "xScanMax":self.xScanMax, - "xScanStep":self.xScanStep, - "yScanMin":self.yScanMin, - "yScanMax":self.yScanMax, - "yScanStep":self.yScanStep, - "Illu1Value":self.Illu1Value, - "Illu2Value":self.Illu2Value, - "Illu3Value":self.Illu3Value, - "MCTFilename":self.MCTFilename, - "MCTFilepath":self.MCTFilePath}) - - @APIExport(runOnUIThread=True) - def startTimelapseImaging(self, tperiod:int=5, nImagesToCapture:int=10, - MCTFilename:str="Test", MCTDate:str="", - zStackEnabled:bool=False, zStackMin:int=0, zStackMax:int=0, zStackStep:int=0, - xyScanEnabled:bool=False, xScanMin:int=0, xScanMax:int=0, xScanStep:int=0, - yScanMin:int=0, yScanMax:int=0, yScanStep:int=0, - IlluValue1:int =-1, IlluValue2:int =-1, IlluValue3:int =-1): - # this is called periodically by the timer - if not self.isMCTrunning: - try: - # make sure there is no exisiting thrad - del self.MCTThread - except: - pass - - # get default date to not overwrite the same files - if MCTDate == "": - MCTDate = datetime.now().strftime("%Y_%m_%d-%I-%M-%S_%p") - uniqueID = np.random.randint(0, 1000) - MCTDate = MCTDate + "_"+str(uniqueID) # make sure we do not overwrite files - - # retreive from REST API - if IlluValue1>=0: self.Illu1Value = IlluValue1 - if IlluValue2>=0: self.Illu2Value = IlluValue2 - if IlluValue3>=0: self.Illu3Value = IlluValue3 - - # get active illuminations - self.activeIlluminations = [] - if self.Illu1Value>0: self.activeIlluminations.append(self.availableIlluminations[0]) - if self.Illu2Value>0 and len(self.availableIlluminations)>1: self.activeIlluminations.append(self.availableIlluminations[1]) - if self.Illu3Value>0 and len(self.availableIlluminations)>2: self.activeIlluminations.append(self.availableIlluminations[2]) - - - # this should decouple the hardware-related actions from the GUI - self.isMCTrunning = True - self.MCTThread = threading.Thread(target=self.startTimelapseImagingThread, args=(tperiod, nImagesToCapture, - MCTFilename, MCTDate, - zStackEnabled, zStackMin, zStackMax, zStackStep, - xyScanEnabled, xScanMin, xScanMax, xScanStep, - yScanMin, yScanMax, yScanStep), daemon=True) - - self.MCTThread.start() - - @APIExport(runOnUIThread=True) - def stopTimelapseImaging(self): - self.stopMCT() - - def doAutofocus(self, params, timeout=10): - self._logger.info("Autofocusing...") - self._widget.setMessageGUI("Autofocusing...") - self._commChannel.sigAutoFocus.emit(int(params["valueRange"]), int(params["valueSteps"])) - self.isAutofocusRunning = True - - try: - while self.isAutofocusRunning: - time.sleep(0.1) - t0 = time.time() - if not self.isAutofocusRunning or time.time()-t0>timeout: - self._logger.info("Autofocusing done.") - return - except Exception as e: - self._logger.error(e) - - - def startTimelapseImagingThread(self, tperiod, nImagesToCapture, - MCTFilename, MCTDate, - zStackEnabled, zStackMin, zStackMax, zStackStep, - xyScanEnabled, xScanMin, xScanMax, xScanStep, - yScanMin, yScanMax, yScanStep): - # this wil run in the background - self.nImagesTaken=0 - self.timeLast = 0 - if zStackEnabled: - nZStack = int(np.ceil((zStackMax-zStackMin)/zStackStep)) - else: - nZStack = 1 - # get current position - if self.positioner is not None: - currentPositions = self.positioner.getPosition() - self.initialPosition = (currentPositions["X"], currentPositions["Y"]) - self.initialPositionZ = currentPositions["Z"] - else: - self.initialPosition = (0,0) - self.initialPositionZ = 0 - - # HDF5 file setup: prepare data storage - fileExtension = "h5" - self.MCTFilePath = self.getSaveFilePath(date=MCTDate, - filename=MCTFilename, - extension=fileExtension) - self._logger.info(f"Saving to {self.MCTFilePath}") - self.detectorWidth, self.detectorHeight = self.detector._camera.SensorWidth, self.detector._camera.SensorHeight - if self.isRGB: - init_dims = (1, len(self.activeIlluminations), nZStack, self.detectorWidth, self.detectorHeight, 3) # time, channels, z, y, x, RGB - max_dims = (None, 3, nZStack, None, None, 3) # Allow unlimited time points and z slices - else: - init_dims = (1, len(self.activeIlluminations), nZStack, self.detectorWidth, self.detectorHeight) # time, channels, z, y, x - max_dims = (None, 3, nZStack, None, None) # Allow unlimited time points and z slices - - self.h5File = HDF5File(filename=self.MCTFilePath, init_dims=init_dims, max_dims=max_dims, isRGB=self.isRGB) - - # run as long as the MCT is active - while(self.isMCTrunning): - # stop measurement once done - if self.nImagesTaken >= nImagesToCapture: - self.isMCTrunning = False - self._logger.debug("Done with timelapse") - if not IS_HEADLESS: self._widget.mctStartButton.setEnabled(True) - break - - # initialize a run - if time.time() - self.timeLast >= (tperiod): - - # run an event - self.timeLast = time.time() # makes sure that the period is measured from launch to launch - - # reserve and free space for displayed stacks - self.LastStackIllu1 = [] - self.LastStackIllu2 = [] - self.LastStackLED = [] - - try: - ''' - AUTOFOCUS - ''' - self.performAutofocus() - - ''' - ACQUIRE CHANNELS, Z-STACKS, XY-SCANS - ''' - self.acquireCZXYScan() - - ''' - UPDATE GUI - ''' - self.updateGUI() - - #increase iterator - self.nImagesTaken += 1 - - except Exception as e: - self._logger.error("Thread closes with Error: "+str(e)) - self.isMCTrunning = False - self._logger.debug("Done with timelapse") - if not IS_HEADLESS: self._widget.mctStartButton.setEnabled(True) - return - - # pause to not overwhelm the CPU - time.sleep(0.1) - - - def updateGUI(self): - - # sneak images into arrays for displaying stack - if self.zStackEnabled and not self.xyScanEnabled: - self.LastStackIllu1ArrayLast = np.array(self.LastStackIllu1) - self.LastStackIllu2ArrayLast = np.array(self.LastStackIllu2) - self.LastStackLEDArrayLast = np.array(self.LastStackLED) - - if not IS_HEADLESS: - # update the text in the GUI - self._widget.setMessageGUI(self.nImagesTaken) - self._widget.mctShowLastButton.setEnabled(True) - - def performAutofocus(self): - if not IS_HEADLESS: autofocusParams = self._widget.getAutofocusValues() - else: return - if self.positioner is not None and self._widget.isAutofocus() and np.mod(self.nImagesTaken, int(autofocusParams['valuePeriod'])) == 0: - self._widget.setMessageGUI("Autofocusing...") - # turn on illuimination - self.activeIlluminations[0].setValue(autofocusParams["valueRange"]) - self.activeIlluminations[0].setEnabled(True) - time.sleep(self.tWait) - self.doAutofocus(autofocusParams) - self.switchOffIllumination() - - def acquireCZXYScan(self): - # precompute steps for xy scan - # snake scan - if self.xyScanEnabled: - xyScanStepsAbsolute = [] - xyScanIndices = [] - # we snake over y - fwdpath = np.arange(self.yScanMin, self.yScanMax, self.yScanStep) - bwdpath = np.flip(fwdpath) - # we increase linearly over x - for indexX, ix in enumerate(np.arange(self.xScanMin, self.xScanMax, self.xScanStep)): - if indexX%2==0: - for indexY, iy in enumerate(fwdpath): - xyScanStepsAbsolute.append([ix, iy]) - else: - for indexY, iy in enumerate(bwdpath): - xyScanStepsAbsolute.append([ix, iy]) - - # reserve space for tiled image - downScaleFactor = 4 - nTilesX = int(np.ceil((self.xScanMax-self.xScanMin)/self.xScanStep)) - nTilesY = int(np.ceil((self.yScanMax-self.yScanMin)/self.yScanStep)) - imageDimensions = self.detector.getLatestFrame().shape # self.detector._camera.CameraWidth TODO not good! - imageDimensionsDownscaled = (imageDimensions[1]//downScaleFactor, imageDimensions[0]//downScaleFactor) # Y/X - tiledImageDimensions = (nTilesX*imageDimensions[1]//downScaleFactor, nTilesY*imageDimensions[0]//downScaleFactor) - self.tiledImage = np.zeros(tiledImageDimensions) - - else: - xyScanStepsAbsolute = [[0,0]] - self.xScanMin = 0 - self.xScanMax = 0 - self.yScanMin = 0 - self.yScanMax = 0 - - - # precompute steps for z scan - if self.zStackEnabled: - zStepsAbsolute = np.arange(self.zStackMin, self.zStackMax, self.zStackStep) + self.initialPositionZ - else: - zStepsAbsolute = [self.initialPositionZ] - - - # in case something is not connected we want to reconnect! - # TODO: This should go into some function outside the MCT!!! - #if not ("IDENTIFIER_NAME" in self._master.UC2ConfigManager.ESP32.state.get_state() and self._master.UC2ConfigManager.ESP32.state.get_state()["IDENTIFIER_NAME"] == "uc2-esp"): - # mThread = threading.Thread(target=self._master.UC2ConfigManager.initSerial) - # mThread.start() - # mThread.join() - - # initialize xyz coordinates - if self.xyScanEnabled and self.positioner is not None: - self.positioner.move(value=(self.xScanMin+self.initialPosition[0],self.yScanMin+self.initialPosition[1]), axis="XY", is_absolute=True, is_blocking=True) - - # initialize iterator - - # iterate over all xy coordinates iteratively - - ''' - XY Scan - ''' - for ipos, iXYPos in enumerate(xyScanStepsAbsolute): - if not self.isMCTrunning: - break - # move to xy position is necessary - if self.xyScanEnabled and self.positioner is not None: - self.positioner.move(value=(iXYPos[0]+self.initialPosition[0],iXYPos[1]+self.initialPosition[1]), axis="XY", is_absolute=True, is_blocking=True) - - ''' - Z-stack - ''' - allZStackFrames = [] - for iZ in zStepsAbsolute: - # move to each position - if self.zStackEnabled and self.positioner is not None: - self.positioner.move(value=iZ, axis="Z", is_absolute=True, is_blocking=True) - time.sleep(self.tWait) # unshake - - ''' - Illumination - ''' - # capture image for every illumination - allChannelFrames = [] - allPositions = [] - for illuIndex, mIllumination in enumerate(self.activeIlluminations): - if mIllumination.name==self.availableIlluminations[0].name: - illuValue = self.Illu1Value - elif mIllumination.name==self.availableIlluminations[1].name: - illuValue = self.Illu2Value - elif mIllumination.name==self.availableIlluminations[2].name: - illuValue = self.Illu3Value - - # change illumination - mIllumination.setValue(illuValue) - mIllumination.setEnabled(True) - - # always mmake sure we get a frame that is not the same as the one with illumination off eventually - timeoutFrameRequest = 1 # seconds # TODO: Make dependent on exposure time - cTime = time.time() - frameSync=3 - lastFrameNumber=-1 - while(1): - # get frame and frame number to get one that is newer than the one with illumination off eventually - mFrame, currentFrameNumber = self.detector.getLatestFrame(returnFrameNumber=True) - if lastFrameNumber==-1: - # first round - lastFrameNumber = currentFrameNumber - if time.time()-cTime> timeoutFrameRequest: - # in case exposure time is too long we need break at one point - break - if currentFrameNumber <= lastFrameNumber+frameSync: - time.sleep(0.01) # off-load CPU - else: - break - # store frames - allChannelFrames.append(mFrame) - mIllumination.setEnabled(False) - - # store positions - mPositions = self.positioner.getPosition() - allPositions.append((mPositions["X"], mPositions["Y"], mPositions["Z"])) - ''' - elif mIllumination=="LEDMatrix": - self.illu.setAll(1, (self.Illu3Value,self.Illu3Value,self.Illu3Value)) - time.sleep(self.tWait) - lastFrame = self.detector.getLatestFrame() - self.LastStackLED.append(lastFrame.copy()) - ''' - allZStackFrames.append(allChannelFrames) - - # ensure all illus are off - self.switchOffIllumination() - - # save to HDF5 - if self.isRGB: - framesToSave = np.transpose(np.array(allZStackFrames), (1,0,3,2,4)) # time, # todo check order!! - else: - framesToSave = np.transpose(np.array(allZStackFrames), (1,0,2,3)) # time, - self.h5File.append_data(self.nImagesTaken, framesToSave, np.array(allPositions)) - self._logger.debug(f"Saved image {self.nImagesTaken} to HDF5") - del framesToSave - - - # reduce backlash => increase chance to endup at the same position - if self.zStackEnabled and self.positioner is not None: - self.positioner.move(value=(self.initialPositionZ), axis="Z", is_absolute=True, is_blocking=True) - - if self.xyScanEnabled: - # lets try to visualize each slice in napari - # def setImage(self, im, colormap="gray", name="", pixelsize=(1,1,1)): - # construct the tiled image - iX = int(np.floor((iXYPos[0]-self.xScanMin) // self.xScanStep)) - iY = int(np.floor((iXYPos[1]-self.yScanMin) // self.yScanStep)) - # handle rgb => turn to mono for now - ''' FIXME: This is currently not working - if len(lastFrame.shape)>2: - lastFrame = np.uint16(np.mean(lastFrame, 0)) - # add tile to large canvas - lastFrameScaled = cv2.resize(lastFrame, None, fx = 1/downScaleFactor, fy = 1/downScaleFactor, interpolation = cv2.INTER_NEAREST) - try: - self.tiledImage[int(iY*imageDimensionsDownscaled[1]):int(iY*imageDimensionsDownscaled[1]+imageDimensionsDownscaled[1]), - int(iX*imageDimensionsDownscaled[0]):int(iX*imageDimensionsDownscaled[0]+imageDimensionsDownscaled[0])] = lastFrameScaled - except Exception as e: - self._logger.error(e) - self._logger.error("Failed to parse a frame into the tiledImage array") - ''' - self.sigImageReceived.emit() # => displays image - - - # initialize xy coordinates - if self.xyScanEnabled and self.positioner is not None: - self.positioner.move(value=(self.initialPosition[0], self.initialPosition[1]), axis="XY", is_absolute=True, is_blocking=True) - if self.zStackEnabled and self.positioner is not None: - self.positioner.move(value=(self.initialPositionZ), axis="Z", is_absolute=True, is_blocking=True) - - - # disable motors to prevent overheating - if self.positioner is not None: - try:self.positioner.enalbeMotors(enable=self.positioner.is_enabled) - except: pass # special case for the ESP32 board - - def switchOffIllumination(self): - # switch off all illu sources - for mIllu in self.activeIlluminations: - mIllu.setEnabled(False) - mIllu.setValue(0) - time.sleep(0.1) - - def changeValueIlluSlider(self, currIllu, value): - allIllus = np.arange(len(self.availableIlluminations)) - # turn on current illumination - if not self.availableIlluminations[currIllu].enabled: self.availableIlluminations[currIllu].setEnabled(1) - self.availableIlluminations[currIllu].setValue(value) - - # switch off other illus - for illuIndex in allIllus: - if illuIndex != currIllu and self.availableIlluminations[illuIndex].power>0: - self.availableIlluminations[illuIndex].setValue(0) - self.availableIlluminations[illuIndex].setEnabled(0) - - def valueIllu1Changed(self, value): - # turn on current illumination based on slider value - currIllu = 0 - self.Illu1Value = value - self._widget.mctLabelIllu1.setText('Intensity (Laser 1):'+str(value)) - self.changeValueIlluSlider(currIllu, value) - - def valueIllu2Changed(self, value): - currIllu = 1 - self.Illu2Value = value - self._widget.mctLabelIllu2.setText('Intensity (Laser 2):'+str(value)) - self.changeValueIlluSlider(currIllu, value) - - def valueIllu3Changed(self, value): - currIllu = 2 - self.Illu3Value = value - self._widget.mctLabelIllu3.setText('Intensity (Laser 3):'+str(value)) - self.changeValueIlluSlider(currIllu, value) - - def __del__(self): - pass - - def getSaveFilePath(self, date, filename, extension): - mFilename = f"{date}_{filename}.{extension}" - dirPath = os.path.join(dirtools.UserFileDirs.Data, 'recordings', date) - newPath = os.path.join(dirPath,mFilename) - - if not os.path.exists(dirPath): - os.makedirs(dirPath) - - return newPath - - def setAutoFocusIsRunning(self, isRunning: bool): - # this is set by the AutofocusController once the AF is finished/initiated - self.isAutofocusRunning = isRunning - - def displayImage(self): - # a bit weird, but we cannot update outside the main thread - name = "tilescanning" - self._widget.setImage(np.uint16(self.tiledImage), colormap="gray", name=name, pixelsize=(1,1), translation=(0,0)) - - - # helper functions - def downscale_image(self, image, factor): - # Downscale the image - downscaled_image = transform.downscale_local_mean(image, (factor, factor)) - return downscaled_image - - def crop_center(self, image, size): - # Get the dimensions of the image - height, width = image.shape[:2] - - # Calculate the coordinates for cropping - start_x = max(0, int((width - size) / 2)) - start_y = max(0, int((height - size) / 2)) - end_x = min(width, start_x + size) - end_y = min(height, start_y + size) - - # Crop the image - cropped_image = image[start_y:end_y, start_x:end_x] - - return cropped_image - - -class HDF5File(object): - def __init__(self, filename, init_dims, max_dims=None, isRGB=False): - self.filename = filename - self.init_dims = init_dims # time, channels, z, y, x - self.max_dims = max_dims # time, channels, z, y, x - self.isRGB=isRGB - self.create_dataset() - - def create_dataset(self): - with h5py.File(self.filename, 'w') as file: - # Create a resizable dataset for the image data - dset = file.create_dataset('ImageData', shape=self.init_dims, maxshape=self.max_dims, dtype='uint16', compression="gzip") - - # Initialize a group for storing metadata - meta_group = file.create_group('Metadata') - - def append_data(self, timepoint, frame_data, xyz_coordinates): - with h5py.File(self.filename, 'a') as file: - dset = file['ImageData'] - meta_group = file['Metadata'] - - # Resize the dataset to accommodate the new timepoint - current_size = dset.shape[0] - dset.resize(current_size + 1, axis=0) - - # Add the new frame data - try: - if self.isRGB: - dset[current_size, :, :, :, :, :] = np.uint16(frame_data) - else: - dset[current_size, :, :, :, :] = np.uint16(frame_data) - except: - # in case X/Y are swapped - if self.isRGB: - dset[current_size, :, :, :, :, :] = np.transpose(np.uint16(frame_data), (0,1,2,4,3)) - else: - dset[current_size, :, :, :, :] = np.transpose(np.uint16(frame_data), (0,1,3,2)) - - # Add metadata for the new frame - for channel, xyz in enumerate(xyz_coordinates): - meta_group.create_dataset(f'Time_{timepoint}_Channel_{channel}', data=np.float32(xyz)) - - - -''' -Crosscolleration based drift correction - if False and not self.xyScanEnabled: - # treat images - imageStack = self.LastStackIllu2 # FIXME: Hardcoded - imageStack = self.LastStackLED # FIXME: Hardcoded - - driftCorrectionDownScaleFactor = 5 - driftCorrectionCropSize = 800 - iShift = [0,0] - imageList = [] - - # convert to list if necessary - if type(imageStack)!=list or len(imageStack)<2: - imageStack = list(imageStack) - - # image processing - for iImage in imageStack: - if len(iImage.shape)>2: - # if RGB => make mono - iImage = np.mean(iImage, -1) - image = self.crop_center(iImage, driftCorrectionCropSize) - image = self.downscale_image(image, driftCorrectionDownScaleFactor) - imageList.append(image) - - # remove background - imageList = np.array(imageList) - if len(imageList.shape)<3: - imageList = np.expand_dims(imageList,0) - imageList = imageList/ndi.filters.gaussian_filter(np.mean(imageList,0), 10) - - # Find max focus - bestFocus = 0 - bestFocusIndex = 0 - for index, image in enumerate(imageList): - # remove high frequencies - imagearraygf = ndi.filters.gaussian_filter(image, 3) - - # compute focus metric - focusValue = np.mean(ndi.filters.laplace(imagearraygf)) - if focusValue > bestFocus: - bestFocus = focusValue - bestFocusIndex = index - - # Align the images - image2 = np.std(imageList, (0)) - - #image2 = scipy.ndimage.gaussian_filter(image2, sigma=10) - if self.nImagesTaken > 0: - shift, error, diffphase = phase_cross_correlation(image1, image2) - iShift += (shift) - - # Shift image2 to align with image1 - image = imageList[bestFocusIndex] - #aligned_image = np.roll(image, int(iShift[1]), axis=1) - #aligned_image = np.roll(aligned_image,int(iShift[0]), axis=0) - self.positioner.move(value=(self.initialPosition[0]+shift[1], self.initialPosition[1]+shift[0]), axis="XY", is_absolute=True, is_blocking=True) - - image1 = image2.copy() - - #save values - #make sure not to have too large travelrange after last (e.g. initial position + 2*shift)) -''' -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imcontrol/controller/controllers/MetadataController.py b/imswitch/imcontrol/controller/controllers/MetadataController.py new file mode 100644 index 000000000..2bdb84602 --- /dev/null +++ b/imswitch/imcontrol/controller/controllers/MetadataController.py @@ -0,0 +1,307 @@ +""" +MetadataController - API endpoints for the MetadataHub and InstrumentMetadataManager. + +Provides REST API endpoints to: +- Query current metadata state +- Get instrument information +- Access detector contexts +- Get frame events +""" + +from typing import Any, Dict, List, Optional +import json + +from imswitch.imcommon.model import APIExport, initLogger +from ..basecontrollers import ImConWidgetController + + +class MetadataController(ImConWidgetController): + """ + Controller providing API access to MetadataHub and InstrumentMetadataManager. + + This controller exposes the metadata state via REST API endpoints, + enabling external tools and frontends to visualize the metadata. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.__logger = initLogger(self) + + # Get references to metadata managers from MasterController + self._metadata_hub = getattr(self._master, 'metadataHub', None) + self._instrument_manager = getattr(self._master, 'instrumentMetadataManager', None) + + if self._metadata_hub is None: + self.__logger.warning("MetadataHub not available") + else: + self.__logger.info("MetadataController initialized with MetadataHub") + + if self._instrument_manager is None: + self.__logger.warning("InstrumentMetadataManager not available") + + # === Global Metadata API === + + @APIExport() + def getMetadataSnapshot(self, flat: bool = False, category: str = None) -> Dict[str, Any]: + """ + Get a snapshot of the current global metadata state. + + Args: + flat: If True, return flat dict with ':' separated keys + category: Optional category filter (e.g., "Positioner", "Illumination") + + Returns: + Dictionary containing current metadata values with timestamps + """ + if self._metadata_hub is None: + return {"error": "MetadataHub not available"} + + return self._metadata_hub.get_latest(flat=flat, filter_category=category) + + @APIExport() + def getMetadataCategories(self) -> List[str]: + """ + Get list of available metadata categories. + + Returns: + List of category names (e.g., ["Positioner", "Illumination", "Detector"]) + """ + if self._metadata_hub is None: + return [] + + metadata = self._metadata_hub.get_latest(flat=True) + categories = set() + for key in metadata.keys(): + parts = key.split(':') + if parts: + categories.add(parts[0]) + return sorted(list(categories)) + + @APIExport() + def getMetadataJSON(self) -> str: + """ + Get metadata as JSON string for external consumption. + + Returns: + JSON string of current metadata state + """ + if self._metadata_hub is None: + return json.dumps({"error": "MetadataHub not available"}) + + return self._metadata_hub.to_json() + + # === Detector Context API === + + @APIExport() + def getDetectorContext(self, detectorName: str) -> Dict[str, Any]: + """ + Get the metadata context for a specific detector. + + Args: + detectorName: Name of the detector + + Returns: + Dictionary with detector context (shape, pixel size, exposure, etc.) + """ + if self._metadata_hub is None: + return {"error": "MetadataHub not available"} + + ctx = self._metadata_hub.get_detector(detectorName) + if ctx is None: + return {"error": f"Detector '{detectorName}' not registered"} + + return ctx.to_dict() + + @APIExport() + def getAllDetectorContexts(self) -> Dict[str, Dict[str, Any]]: + """ + Get metadata contexts for all registered detectors. + + Returns: + Dictionary mapping detector names to their contexts + """ + if self._metadata_hub is None: + return {"error": "MetadataHub not available"} + + return self._metadata_hub.export_detector_contexts() + + # === Frame Events API === + + @APIExport() + def getFrameEvents(self, detectorName: str, maxEvents: int = 100) -> List[Dict[str, Any]]: + """ + Get recent frame events for a detector. + + Args: + detectorName: Name of the detector + maxEvents: Maximum number of events to return + + Returns: + List of frame event dictionaries + """ + if self._metadata_hub is None: + return [] + + events = self._metadata_hub.get_frame_events(detectorName, limit=maxEvents) + return [e.to_dict() for e in events] + + @APIExport() + def getLatestFrameEvent(self, detectorName: str) -> Dict[str, Any]: + """ + Get the most recent frame event for a detector. + + Args: + detectorName: Name of the detector + + Returns: + Dictionary with latest frame event or empty dict if none + """ + if self._metadata_hub is None: + return {} + + events = self._metadata_hub.get_frame_events(detectorName, limit=1) + if events: + return events[0].to_dict() + return {} + + # === Instrument Metadata API === + + @APIExport() + def getInstrumentInfo(self) -> Dict[str, Any]: + """ + Get complete instrument metadata (microscope configuration). + + Returns: + Dictionary with instrument info including UC2 components, filters, etc. + """ + if self._instrument_manager is None: + return {"error": "InstrumentMetadataManager not available"} + + return self._instrument_manager.instrument_info.to_dict() + + @APIExport() + def getOMEInstrument(self) -> Dict[str, Any]: + """ + Get instrument metadata formatted for OME-types. + + Returns: + Dictionary compatible with ome_types.model.Instrument + """ + if self._instrument_manager is None: + return {"error": "InstrumentMetadataManager not available"} + + return self._instrument_manager.get_ome_instrument_dict() + + @APIExport() + def getInstrumentComponents(self) -> List[Dict[str, Any]]: + """ + Get list of UC2 optical components. + + Returns: + List of component dictionaries + """ + if self._instrument_manager is None: + return [] + + from dataclasses import asdict + return [asdict(c) for c in self._instrument_manager.instrument_info.components] + + @APIExport() + def getInstrumentFilters(self) -> List[Dict[str, Any]]: + """ + Get list of optical filters. + + Returns: + List of filter dictionaries + """ + if self._instrument_manager is None: + return [] + + from dataclasses import asdict + return [asdict(f) for f in self._instrument_manager.instrument_info.filters] + + @APIExport() + def loadUC2OptiKitConfig(self, configPath: str) -> bool: + """ + Load UC2 OptiKit configuration from a JSON file. + + Args: + configPath: Path to UC2 OptiKit JSON configuration file + + Returns: + True if loaded successfully + """ + if self._instrument_manager is None: + return False + + return self._instrument_manager.load_uc2_optikit_config(configPath) + + @APIExport() + def setFirmwareVersion(self, version: str) -> None: + """ + Set the firmware version string. + + Args: + version: Firmware version string + """ + if self._instrument_manager is not None: + self._instrument_manager.set_firmware_version(version) + + @APIExport() + def setTubeLens(self, focalLengthMm: float, magnification: float = 1.0) -> None: + """ + Set tube lens parameters. + + Args: + focalLengthMm: Focal length in millimeters + magnification: Tube lens magnification factor + """ + if self._instrument_manager is not None: + self._instrument_manager.set_tube_lens(focalLengthMm, magnification) + + # === Shared Attributes Integration === + + @APIExport() + def getSharedAttributes(self) -> Dict[str, Any]: + """ + Get the shared attributes from the communication channel. + + Returns: + Dictionary of shared attributes + """ + try: + return json.loads(self._commChannel.sharedAttrs.getJSON()) + except Exception as e: + self.__logger.error(f"Error getting shared attributes: {e}") + return {} + + @APIExport() + def getSharedAttributesFlat(self) -> Dict[str, Any]: + """ + Get shared attributes in flat format (HDF5 style). + + Returns: + Dictionary with ':' separated keys + """ + try: + return self._commChannel.sharedAttrs.getSharedAttributes() + except Exception as e: + self.__logger.error(f"Error getting shared attributes: {e}") + return {} + + +# Copyright (C) 2020-2024 ImSwitch developers +# This file is part of ImSwitch. +# +# ImSwitch is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ImSwitch is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . diff --git a/imswitch/imcontrol/controller/controllers/MotCorrController.py b/imswitch/imcontrol/controller/controllers/MotCorrController.py deleted file mode 100644 index be853a293..000000000 --- a/imswitch/imcontrol/controller/controllers/MotCorrController.py +++ /dev/null @@ -1,24 +0,0 @@ -from ..basecontrollers import ImConWidgetController - - -class MotCorrController(ImConWidgetController): - """ Linked to MotCorrWidget.""" - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self._manager = self._master.standManager - - self._widget.motcorrControl.slider.valueChanged[int].connect(self.changeSlider) - self._widget.motcorrControl.setPointEdit.returnPressed.connect(self.changeEdit) - - def changeSlider(self, _): - """ Called when the slider is moved, sets the motorized correction collar position to value.""" - self._manager.motCorrPos(self._widget.motcorrControl.slider.value()) - self._widget.motcorrControl.setPointEdit.setText(str(self._widget.motcorrControl.slider.value())) - - def changeEdit(self): - """ Called when the user manually changes the position value of the - correction collar. Sets the position to the corresponding value.""" - self._manager.motCorrPos(float(self._widget.motcorrControl.setPointEdit.text())) - self._widget.motcorrControl.slider.setValue(float(self._widget.motcorrControl.setPointEdit.text())) - diff --git a/imswitch/imcontrol/controller/controllers/ObjectiveController.py b/imswitch/imcontrol/controller/controllers/ObjectiveController.py index a7d0c9a6b..83fc9e310 100644 --- a/imswitch/imcontrol/controller/controllers/ObjectiveController.py +++ b/imswitch/imcontrol/controller/controllers/ObjectiveController.py @@ -291,6 +291,7 @@ def _updatePixelSize(self): """ Update pixel size in detector based on current objective. Internal method called after objective changes. + Also updates SharedAttributes for metadata tracking. """ if self._currentObjective is None or self._currentObjective not in [0, 1]: return @@ -305,6 +306,9 @@ def _updatePixelSize(self): self.detector.setPixelSizeUm(pixelsize) self._logger.debug(f"Updated detector pixel size to {pixelsize} µm/px") + # Update SharedAttributes for metadata tracking + self._updateSharedAttrs() + def _onConfigParametersChanged(self, slot: int, params: dict): """ Handle objective parameters changed signal from config manager. @@ -602,6 +606,38 @@ def getstatus(self): return status + # === SharedAttributes for metadata tracking === + + def setSharedAttr(self, attr, value): + """Set a shared attribute for metadata tracking.""" + self._commChannel.sharedAttrs[(_attrCategory, attr)] = value + + def _updateSharedAttrs(self): + """Update all shared attributes with current objective state.""" + if self._currentObjective is None: + return + + # Update objective metadata for the metadata hub + self.setSharedAttr(_nameAttr, self._getCurrentObjectiveName()) + self.setSharedAttr(_magnificationAttr, self._getCurrentMagnification()) + self.setSharedAttr(_naAttr, self._getCurrentNA()) + self.setSharedAttr(_pixelSizeAttr, self._getCurrentPixelSize()) + self.setSharedAttr(_slotAttr, self._currentObjective) + + fov = self._getCurrentFOV() + if fov: + self.setSharedAttr(_fovUmAttr, fov) + + +# Metadata key constants for SharedAttributes +_attrCategory = 'Objective' +_nameAttr = 'Name' +_magnificationAttr = 'Magnification' +_naAttr = 'NA' +_pixelSizeAttr = 'PixelSizeUm' +_slotAttr = 'TurretIndex' +_fovUmAttr = 'FOVUm' + # Copyright (C) 2020-2024 ImSwitch developers # This file is part of ImSwitch. diff --git a/imswitch/imcontrol/controller/controllers/PositionerController.py b/imswitch/imcontrol/controller/controllers/PositionerController.py index a318f6d8a..7e4b2d12d 100644 --- a/imswitch/imcontrol/controller/controllers/PositionerController.py +++ b/imswitch/imcontrol/controller/controllers/PositionerController.py @@ -37,15 +37,14 @@ def __init__(self, *args, **kwargs): self.setSharedAttr(pName, axis, _stopAttr, pManager.stop[axis]) # Connect CommunicationChannel signals - if 0: #IS_HEADLESS:IS_HEADLESS: - self._commChannel.sharedAttrs.sigAttributeSet.connect(self.attrChanged, check_nargs=False) - else: - self._commChannel.sharedAttrs.sigAttributeSet.connect(self.attrChanged) - + self._commChannel.sharedAttrs.sigAttributeSet.connect(self.attrChanged) + + # Connect position update signal - this updates shared attributes for all modes + # This is the primary mechanism for keeping metadata in sync with hardware state + self._commChannel.sigUpdateMotorPosition.connect(self._onMotorPositionUpdate) # Connect PositionerWidget signals if not IS_HEADLESS: - self._commChannel.sigUpdateMotorPosition.connect(self.updateAllPositionGUI) # force update position in GUI self._widget.sigStepUpClicked.connect(self.stepUp) self._widget.sigStepDownClicked.connect(self.stepDown) self._widget.sigStepAbsoluteClicked.connect(self.moveAbsolute) @@ -68,7 +67,11 @@ def getSpeed(self): return self._master.positionersManager.execOnAll(lambda p: p.speed) def move(self, positionerName, axis, dist, isAbsolute=None, isBlocking=False, speed=None): - """ Moves positioner by dist micrometers in the specified axis. """ + """ Moves positioner by dist micrometers in the specified axis. + + For non-blocking moves, the position will be updated asynchronously via + sigUpdateMotorPosition signal from the positioner manager. + """ if positionerName is None or positionerName == "" or positionerName not in self._master.positionersManager: positionerName = self._master.positionersManager.getAllDeviceNames()[0] @@ -83,6 +86,10 @@ def move(self, positionerName, axis, dist, isAbsolute=None, isBlocking=False, sp speed = 5000 # FIXME: default speed for headless mode # set speed for the positioner self.setSpeed(positionerName=positionerName, speed=speed, axis=axis) + + # Set "IsMoving" attribute before move starts + self.setSharedAttr(positionerName, axis, _isMovingAttr, True) + try: # special case for UC2 positioner that takes more arguments self._master.positionersManager[positionerName].move(dist, axis, isAbsolute, isBlocking) @@ -93,9 +100,12 @@ def move(self, positionerName, axis, dist, isAbsolute=None, isBlocking=False, sp # if the positioner does not have the move method, use the default move method self._logger.error(e) self._master.positionersManager[positionerName].move(dist, axis) - if isBlocking: # push signal immediately + + if isBlocking: + # For blocking moves, update position and metadata immediately self._commChannel.sigUpdateMotorPosition.emit(self.getPos()) - #self.updatePosition(positionerName, axis) + self.setSharedAttr(positionerName, axis, _isMovingAttr, False) + # For non-blocking moves, position will be updated via sigUpdateMotorPosition signal def moveForever(self, positionerName: str=None, axis="X", speed=0, is_stop:bool=False): """ Moves positioner forever. """ @@ -127,23 +137,44 @@ def setSpeed(self, positionerName, axis, speed=(1000, 1000, 1000)): self.setSharedAttr(positionerName, axis, _speedAttr, speed) if not IS_HEADLESS: self._widget.setSpeedSize(positionerName, axis, speed) - def updateAllPositionGUI(self): - # update all positions for all axes in GUI + def _onMotorPositionUpdate(self, positionData: Dict = None): + """ + Handler for sigUpdateMotorPosition signal. + Updates shared attributes and GUI for all positioners. + + This is the central point where motor positions are synced to the metadata system. + Called both from blocking moves and asynchronous position updates from hardware. + + Args: + positionData: Optional position data dict. If None, positions are read from managers. + """ for positionerName in self._master.positionersManager.getAllDeviceNames(): - for axis in self._master.positionersManager[positionerName].axes: + positioner = self._master.positionersManager[positionerName] + for axis in positioner.axes: self.updatePosition(positionerName, axis) - self.updateSpeed(positionerName, axis) + # Also update speed if available + if hasattr(positioner, 'speed'): + for axis in positioner.axes: + self.updateSpeed(positionerName, axis) + + def updateAllPositionGUI(self): + """Legacy method - calls _onMotorPositionUpdate for backwards compatibility.""" + self._onMotorPositionUpdate() def updatePosition(self, positionerName, axis): + """Update position for a single axis and sync to shared attributes.""" if axis == "XY": - for axis in (("X", "Y")): - newPos = self._master.positionersManager[positionerName].position[axis] - self.setSharedAttr(positionerName, axis, _positionAttr, newPos) - if not IS_HEADLESS: self._widget.updatePosition(positionerName, axis, newPos) + # Handle combined XY axis by updating both X and Y + for single_axis in ("X", "Y"): + newPos = self._master.positionersManager[positionerName].position[single_axis] + self.setSharedAttr(positionerName, single_axis, _positionAttr, newPos) + if not IS_HEADLESS: + self._widget.updatePosition(positionerName, single_axis, newPos) else: newPos = self._master.positionersManager[positionerName].position[axis] self.setSharedAttr(positionerName, axis, _positionAttr, newPos) - if not IS_HEADLESS: self._widget.updatePosition(positionerName, axis, newPos) + if not IS_HEADLESS: + self._widget.updatePosition(positionerName, axis, newPos) def updateSpeed(self, positionerName, axis): newSpeed = self._master.positionersManager[positionerName].speed[axis] @@ -187,13 +218,10 @@ def setXYPosition(self, x, y): positionerY = self.getPositionerNames()[1] self.__logger.debug(f"Move {positionerX}, axis X, dist {str(x)}") self.__logger.debug(f"Move {positionerY}, axis Y, dist {str(y)}") - # self.move(positionerX, 'X', x) - # self.move(positionerY, 'Y', y) def setZPosition(self, z): positionerZ = self.getPositionerNames()[2] self.__logger.debug(f"Move {positionerZ}, axis Z, dist {str(z)}") - # self.move(self.getPositionerNames[2], 'Z', z) @APIExport(runOnUIThread=True) def enalbeMotors(self, enable=None, enableauto=None): @@ -429,6 +457,7 @@ def moveToSampleLoadingPosition(self, positionerName=None, speed=10000, is_block _speedAttr = "Speed" _homeAttr = "Home" _stopAttr = "Stop" +_isMovingAttr = "IsMoving" # Copyright (C) 2020-2024 ImSwitch developers # This file is part of ImSwitch. diff --git a/imswitch/imcontrol/controller/controllers/RecordingController.py b/imswitch/imcontrol/controller/controllers/RecordingController.py index 6e8b1eb17..fa90ed1d1 100644 --- a/imswitch/imcontrol/controller/controllers/RecordingController.py +++ b/imswitch/imcontrol/controller/controllers/RecordingController.py @@ -66,9 +66,7 @@ def __init__(self, *args, **kwargs): self._widget.setSnapSaveModeVisible(self._setupInfo.hasWidget("Image")) self._widget.setRecSaveMode(SaveMode.Disk.value) - self._widget.setRecSaveModeVisible( - self._moduleCommChannel.isModuleRegistered("imreconstruct") - ) + # Connect RecordingWidget signals self._widget.sigDetectorModeChanged.connect(self.detectorChanged) @@ -124,14 +122,14 @@ def snap(self, name=None, mSaveFormat=None) -> dict: savename = os.path.join(folder, self.getFileName() + "_" + name) attrs = { - detectorName: self._commChannel.sharedAttrs.getHDF5Attributes() + detectorName: self._get_detector_attrs(detectorName) for detectorName in detectorNames } if not IS_HEADLESS: saveMode = SaveMode(self._widget.getSnapSaveMode()) else: - saveMode = SaveMode(1) # TODO: Assuming we want to save the image + saveMode = SaveMode.Disk # TODO: Assuming we want to save the image self._master.recordingManager.snap( detectorNames, savename, saveMode, mSaveFormat, attrs ) @@ -141,7 +139,7 @@ def snapNumpy(self): self.updateRecAttrs(isSnapping=True) detectorNames = self.getDetectorNamesToCapture() attrs = { - detectorName: self._commChannel.sharedAttrs.getHDF5Attributes() + detectorName: self._get_detector_attrs(detectorName) for detectorName in detectorNames } @@ -168,7 +166,7 @@ def snapImagePrev(self, *args): time.sleep(0.01) savename = os.path.join(folder, self.getFileName()) + "_snap_" + suffix - attrs = {detectorName: self._commChannel.sharedAttrs.getHDF5Attributes()} + attrs = {detectorName: self._get_detector_attrs(detectorName)} self._master.recordingManager.snapImagePrev( detectorName, @@ -201,7 +199,7 @@ def toggleREC(self, checked): "saveMode": SaveMode(self._widget.getRecSaveMode()), "saveFormat": SaveFormat(self._widget.getsaveFormat()), "attrs": { - detectorName: self._commChannel.sharedAttrs.getHDF5Attributes() + detectorName: self._get_detector_attrs(detectorName) for detectorName in detectorsBeingCaptured }, "singleMultiDetectorFile": ( @@ -254,7 +252,7 @@ def nextLapse(self): if isFirstLapse: self._commChannel.sigScanStarting.emit() # To get updated values from sharedAttrs self.recordingArgs["attrs"] = { # Update - detectorName: self._commChannel.sharedAttrs.getHDF5Attributes() + detectorName: self._get_detector_attrs(detectorName) for detectorName in self.recordingArgs["detectorNames"] } self.recordingArgs["recFrames"] = ( @@ -385,6 +383,55 @@ def getFileName(self): filename = time.strftime("%Hh%Mm%Ss") return filename + def _get_detector_attrs(self, detector_name): + """ + Get attributes for a detector, combining SharedAttrs and MetadataHub. + + Args: + detector_name: Name of the detector + + Returns: + Dictionary of attributes for the detector + """ + # Start with SharedAttrs (backwards compatible) + attrs = self._commChannel.sharedAttrs.getSharedAttributes() # TODO: HDF5 doesn't exist anymore + + # Add MetadataHub snapshot if available + if hasattr(self._master, 'metadataHub') and self._master.metadataHub is not None: + try: + import json + + # Get global metadata snapshot + global_snapshot = self._master.metadataHub.snapshot_global() + + # Get detector-specific snapshot + detector_snapshot = self._master.metadataHub.snapshot_detector(detector_name) + + # Serialize and add to attrs + if global_snapshot: + attrs['_metadata_hub_global'] = json.dumps(global_snapshot, default=str) + + if detector_snapshot: + # Add detector context + if 'detector_context' in detector_snapshot: + ctx = detector_snapshot['detector_context'] + attrs[f'{detector_name}:pixel_size_um'] = ctx.get('pixel_size_um') + attrs[f'{detector_name}:shape_px'] = json.dumps(ctx.get('shape_px')) + attrs[f'{detector_name}:fov_um'] = json.dumps(ctx.get('fov_um')) + if ctx.get('exposure_ms') is not None: + attrs[f'{detector_name}:exposure_ms'] = ctx.get('exposure_ms') + if ctx.get('gain') is not None: + attrs[f'{detector_name}:gain'] = ctx.get('gain') + + # Add detector-specific metadata + if 'metadata' in detector_snapshot: + for key, value_dict in detector_snapshot['metadata'].items(): + attrs[f'{detector_name}:hub:{key}'] = value_dict.get('value') + except Exception as e: + self.__logger.warning(f"Error getting metadata hub snapshot: {e}") + + return attrs + def attrChanged(self, key, value): if ( self.settingAttr @@ -774,7 +821,7 @@ def startRecording(self, mSaveFormat: int = SaveFormat.TIFF) -> None: "saveMode": SaveMode(1), # Disk "saveFormat": mSaveFormat, # TIFF "attrs": { - detectorName: self._commChannel.sharedAttrs.getHDF5Attributes() + detectorName: self._commChannel.sharedAttrs.getSharedAttributes() # TODO: HDF5 doesn't exist anymore for detectorName in detectorsBeingCaptured }, } diff --git a/imswitch/imcontrol/controller/controllers/ScanManagerMoNaLISA.py b/imswitch/imcontrol/controller/controllers/ScanManagerMoNaLISA.py deleted file mode 100644 index 81b296ab2..000000000 --- a/imswitch/imcontrol/controller/controllers/ScanManagerMoNaLISA.py +++ /dev/null @@ -1,57 +0,0 @@ -from .ScanManagerBase import SuperScanManager - - -class ScanManagerMoNaLISA(SuperScanManager): - """ ScanManager helps with generating signals for scanning. """ - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - @property - def TTLTimeUnits(self): - self._checkScanDefined() - return self._TTLCycleDesigner.timeUnits - - def makeFullScan(self, scanParameters, TTLParameters, staticPositioner=False): - """ Generates stage and TTL scan signals. """ - self._checkScanDefined() - - if not staticPositioner: - scanSignalsDict, positions, scanInfoDict = self.getScanSignalsDict(scanParameters) - if not self._scanDesigner.checkSignalComp( - scanParameters, self._setupInfo, scanInfoDict - ): - self._logger.error( - 'Signal voltages outside scanner ranges: try scanning a smaller ROI or a slower' - ' scan.' - ) - return - - TTLCycleSignalsDict = self.getTTLCycleSignalsDict(TTLParameters, scanInfoDict) - else: - TTLCycleSignalsDict = self.getTTLCycleSignalsDict(TTLParameters) - scanSignalsDict = {} - scanInfoDict = {} - - return ( - {'scanSignalsDict': scanSignalsDict, - 'TTLCycleSignalsDict': TTLCycleSignalsDict}, - scanInfoDict - ) - - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imcontrol/model/SetupInfo.py b/imswitch/imcontrol/model/SetupInfo.py index e30b7c67a..95b0c139b 100644 --- a/imswitch/imcontrol/model/SetupInfo.py +++ b/imswitch/imcontrol/model/SetupInfo.py @@ -242,9 +242,6 @@ class ObjectiveInfo: calibrateOnStart: bool = True active: bool = True -@dataclass(frozen=False) -class MCTInfo: - tWait: int class ROIScanInfo: pass @@ -291,6 +288,60 @@ class FlowStopInfo: class LepmonInfo: pass + +@dataclass(frozen=False) +class InstrumentInfo: + """ + Microscope instrument metadata for OME-types integration. + + This configuration is loaded from the setup JSON and provides + static instrument information that doesn't change during operation. + Dynamic values (firmware version, etc.) are updated at runtime. + """ + + # Microscope identification + name: str = "openUC2 Microscope" + """ Instrument display name. """ + + microscopeType: str = "Inverted" + """ Microscope type: 'Inverted', 'Upright', or 'Other'. """ + + manufacturer: str = "openUC2" + """ Instrument manufacturer. """ + + model: str = "UC2 Frame" + """ Instrument model. """ + + serialNumber: str = "" + """ Instrument serial number. """ + + # Optical configuration + tubeLensFocalLengthMm: float = 180.0 + """ Tube lens focal length in millimeters. Standard Nikon = 180mm. """ + + tubeLensMagnification: float = 1.0 + """ Tube lens magnification factor. """ + + # UC2 specific metadata + uc2FrameName: str = "" + """ Name of the UC2 frame configuration. """ + + uc2FrameAuthor: str = "" + """ Author of the UC2 frame configuration. """ + + uc2FrameVersion: str = "1.0.0" + """ Version of the UC2 frame configuration. """ + + uc2Verified: bool = False + """ Whether the UC2 frame is verified. """ + + uc2OptiKitConfigPath: Optional[str] = None + """ Path to UC2 OptiKit JSON configuration file. """ + + # Filter configuration (static filters defined in setup) + filters: List[Dict[str, Any]] = field(default_factory=list) + """ List of optical filters. Each entry: {name, filterType, wavelengthNm, bandwidthNm}. """ + @dataclass(frozen=False) class FlatfieldInfo: pass @@ -663,9 +714,6 @@ class SetupInfo: objective: Optional[ObjectiveInfo] = field(default_factory=lambda: None) """ Objective settings. Required to be defined to use Objective functionality. """ - mct: Optional[MCTInfo] = field(default_factory=lambda: None) - """ MCT settings. Required to be defined to use MCT functionality. """ - nidaq: NidaqInfo = field(default_factory=NidaqInfo) """ NI-DAQ settings. """ @@ -673,7 +721,7 @@ class SetupInfo: """ ROIScan settings. Required to be defined to use ROIScan functionality. """ lightsheet: Optional[LightsheetInfo] = field(default_factory=lambda: None) - """ MCT settings. Required to be defined to use Lightsheet functionality. """ + """ lighthseet settings. Required to be defined to use Lightsheet functionality. """ webrtc: Optional[WebRTCInfo] = field(default_factory=lambda: None) """ WebRTC settings. Required to be defined to use WebRTC functionality. """ @@ -748,6 +796,10 @@ class SetupInfo: storage: Optional[StorageInfo] = field(default_factory=lambda: None) """ Storage configuration for data paths. Contains persistent storage settings. """ + instrument: Optional[InstrumentInfo] = field(default_factory=lambda: None) + """ Instrument metadata for OME-types integration. Contains microscope identification, + optical configuration, and UC2-specific metadata. """ + nidaq: NidaqInfo = field(default_factory=NidaqInfo) """ NI-DAQ settings. """ diff --git a/imswitch/imcontrol/model/__init__.py b/imswitch/imcontrol/model/__init__.py index dc4fd4903..2eec4ca51 100644 --- a/imswitch/imcontrol/model/__init__.py +++ b/imswitch/imcontrol/model/__init__.py @@ -5,5 +5,4 @@ from .signaldesigners import SignalDesignerFactory import sys -#sys.modules['visa'] = 'pyvisa' diff --git a/imswitch/imcontrol/model/interfaces/CameraWebcam.py b/imswitch/imcontrol/model/interfaces/CameraWebcam.py index 4861bde1b..3ab923a37 100644 --- a/imswitch/imcontrol/model/interfaces/CameraWebcam.py +++ b/imswitch/imcontrol/model/interfaces/CameraWebcam.py @@ -96,7 +96,7 @@ def flushBuffer(self): return def getLastChunk(self): - # get frames from camera'S buffer => e.g. for Hdf5 saving + # get frames from camera'S buffer frame = self.camera.read() frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) chunk = np.expand_dims(frame, 0) diff --git a/imswitch/imcontrol/model/managers/ExperimentManager_new.py b/imswitch/imcontrol/model/managers/ExperimentManager_new.py deleted file mode 100644 index ee9511186..000000000 --- a/imswitch/imcontrol/model/managers/ExperimentManager_new.py +++ /dev/null @@ -1,130 +0,0 @@ -from imswitch.imcommon.framework import Signal, SignalInterface -from imswitch.imcommon.model import initLogger - -class ExperimentManager(SignalInterface): - - def __init__(self, experimentInfo, *args, **kwargs): - self.sigExperimentMaskUpdated = Signal(object) # (maskCombined) # (maskCombined) - super().__init__(*args, **kwargs) - self.__logger = initLogger(self) - - # Initialize OMERO configuration from experimentInfo (similar to MCTManager pattern) - if experimentInfo is not None: - # OMERO configuration from setup - self.omeroServerUrl = getattr(experimentInfo, 'omeroServerUrl', "localhost") - self.omeroUsername = getattr(experimentInfo, 'omeroUsername', "") - self.omeroPassword = getattr(experimentInfo, 'omeroPassword', "") - self.omeroPort = getattr(experimentInfo, 'omeroPort', 4064) - self.omeroGroupId = getattr(experimentInfo, 'omeroGroupId', -1) - self.omeroProjectId = getattr(experimentInfo, 'omeroProjectId', -1) - self.omeroDatasetId = getattr(experimentInfo, 'omeroDatasetId', -1) - self.omeroEnabled = getattr(experimentInfo, 'omeroEnabled', False) - self.omeroConnectionTimeout = getattr(experimentInfo, 'omeroConnectionTimeout', 30) - self.omeroUploadTimeout = getattr(experimentInfo, 'omeroUploadTimeout', 300) - self.__logger.info("OMERO configuration loaded from setup info") - else: - # Default values if no experimentInfo provided - self.omeroServerUrl = "localhost" - self.omeroUsername = "" - self.omeroPassword = "" - self.omeroPort = 4064 - self.omeroGroupId = -1 - self.omeroProjectId = -1 - self.omeroDatasetId = -1 - self.omeroEnabled = False - self.omeroConnectionTimeout = 30 - self.omeroUploadTimeout = 300 - self.__logger.info("OMERO configuration initialized with defaults") - - # General timing parameter (similar to MCTManager's tWait) - self.tWait = 0.1 - - self.update() - - def update(self): - return None - - def getOmeroConfig(self): - """Get the current OMERO configuration as a dictionary.""" - return { - "serverUrl": self.omeroServerUrl, - "username": self.omeroUsername, - "password": self.omeroPassword, - "port": self.omeroPort, - "groupId": self.omeroGroupId, - "projectId": self.omeroProjectId, - "datasetId": self.omeroDatasetId, - "isEnabled": self.omeroEnabled, - "connectionTimeout": self.omeroConnectionTimeout, - "uploadTimeout": self.omeroUploadTimeout - } - - def setOmeroConfig(self, config_dict): - """Set OMERO configuration from a dictionary.""" - if "serverUrl" in config_dict: - self.omeroServerUrl = config_dict["serverUrl"] - if "username" in config_dict: - self.omeroUsername = config_dict["username"] - if "password" in config_dict: - self.omeroPassword = config_dict["password"] - if "port" in config_dict: - self.omeroPort = config_dict["port"] - if "groupId" in config_dict: - self.omeroGroupId = config_dict["groupId"] - if "projectId" in config_dict: - self.omeroProjectId = config_dict["projectId"] - if "datasetId" in config_dict: - self.omeroDatasetId = config_dict["datasetId"] - if "isEnabled" in config_dict: - self.omeroEnabled = config_dict["isEnabled"] - if "connectionTimeout" in config_dict: - self.omeroConnectionTimeout = config_dict["connectionTimeout"] - if "uploadTimeout" in config_dict: - self.omeroUploadTimeout = config_dict["uploadTimeout"] - - self.__logger.info("OMERO configuration updated") - - def isOmeroEnabled(self): - """Check if OMERO integration is enabled.""" - return self.omeroEnabled - - def getOmeroConnectionParams(self): - """Get OMERO connection parameters as a dictionary.""" - if not self.isOmeroEnabled(): - return None - - return { - "serverUrl": self.omeroServerUrl, - "username": self.omeroUsername, - "password": self.omeroPassword, - "port": self.omeroPort, - "connectionTimeout": self.omeroConnectionTimeout - } - - def getOmeroUploadParams(self): - """Get OMERO upload parameters as a dictionary.""" - if not self.isOmeroEnabled(): - return None - - return { - "groupId": self.omeroGroupId, - "projectId": self.omeroProjectId, - "datasetId": self.omeroDatasetId, - "uploadTimeout": self.omeroUploadTimeout - } - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imcontrol/model/managers/InstrumentMetadataManager.py b/imswitch/imcontrol/model/managers/InstrumentMetadataManager.py new file mode 100644 index 000000000..7fd9e7448 --- /dev/null +++ b/imswitch/imcontrol/model/managers/InstrumentMetadataManager.py @@ -0,0 +1,471 @@ +""" +InstrumentMetadataManager - Manager for microscope instrument metadata. + +Provides comprehensive instrument information for OME-types metadata, +including UC2 components, firmware version, optical configuration, etc. + +This integrates with the MetadataHub to provide a complete picture of +the instrument state at acquisition time. +""" + +import json +import time +import uuid +from dataclasses import dataclass, field, asdict +from typing import Any, Dict, List, Optional +from pathlib import Path + +from imswitch.imcommon.framework import Signal, SignalInterface +from imswitch.imcommon.model import initLogger + + +@dataclass +class OpticalComponent: + """ + Represents an optical component in the microscope setup. + + Compatible with UC2 OptiKit JSON format and OME-types Instrument model. + """ + name: str + module_id: str + description: str = "" + grid_position: tuple = (0, 0, 0) + rotation: tuple = (0, 0, 0) + params: Dict[str, Any] = field(default_factory=dict) + # OME-compatible fields + manufacturer: str = "openUC2" + model: str = "" + serial_number: str = "" + + +@dataclass +class FilterInfo: + """Filter set information for OME metadata.""" + name: str + filter_type: str # "Excitation", "Emission", "Dichroic" + wavelength_nm: Optional[float] = None + bandwidth_nm: Optional[float] = None + manufacturer: str = "" + model: str = "" + + +@dataclass +class InstrumentInfo: + """ + Complete instrument metadata for OME-types integration. + + Maps to ome_types.model.Instrument structure. + """ + # Microscope identification + name: str = "openUC2 Microscope" + microscope_type: str = "Inverted" # or "Upright", "Other" + manufacturer: str = "openUC2" + model: str = "UC2 Frame" + serial_number: str = "" + firmware_version: str = "" + + # Configuration UUID - unique identifier for this optical configuration + configuration_uuid: str = field(default_factory=lambda: str(uuid.uuid4())) + + # UC2 specific + uc2_frame_name: str = "" + uc2_frame_author: str = "" + uc2_frame_version: str = "1.0.0" + uc2_verified: bool = False + + # Components + components: List[OpticalComponent] = field(default_factory=list) + filters: List[FilterInfo] = field(default_factory=list) + + # Tube lens + tube_lens_focal_length_mm: float = 180.0 # Standard Nikon tube lens + tube_lens_magnification: float = 1.0 + + # Timestamps + created_at: str = "" + last_modified: str = "" + + def to_dict(self) -> Dict[str, Any]: + """Convert to dictionary for JSON serialization.""" + return { + 'name': self.name, + 'microscope_type': self.microscope_type, + 'manufacturer': self.manufacturer, + 'model': self.model, + 'serial_number': self.serial_number, + 'firmware_version': self.firmware_version, + 'configuration_uuid': self.configuration_uuid, + 'uc2_frame_name': self.uc2_frame_name, + 'uc2_frame_author': self.uc2_frame_author, + 'uc2_frame_version': self.uc2_frame_version, + 'uc2_verified': self.uc2_verified, + 'components': [asdict(c) for c in self.components], + 'filters': [asdict(f) for f in self.filters], + 'tube_lens_focal_length_mm': self.tube_lens_focal_length_mm, + 'tube_lens_magnification': self.tube_lens_magnification, + 'created_at': self.created_at, + 'last_modified': self.last_modified, + } + + +class InstrumentMetadataManager(SignalInterface): + """ + Manager for microscope instrument metadata. + + Collects and provides instrument information from: + - UC2 OptiKit configuration files + - ESP32 firmware version + - Setup JSON configuration + - Runtime state + + This information is used to populate OME-types Instrument metadata + during image acquisition and storage. + """ + + sigInstrumentInfoUpdated = Signal(object) # InstrumentInfo + + def __init__(self, instrumentInfo=None, setupInfo=None, lowLevelManagers=None, *args, **kwargs): + super().__init__(*args, **kwargs) + self.__logger = initLogger(self) + + self._setupInfo = setupInfo + self._lowLevelManagers = lowLevelManagers or {} + + # Initialize instrument info + self._instrument_info = InstrumentInfo( + created_at=time.strftime("%Y-%m-%dT%H:%M:%S.000Z"), + last_modified=time.strftime("%Y-%m-%dT%H:%M:%S.000Z"), + ) + + # Load configuration if provided + if instrumentInfo is not None: + self._load_from_config(instrumentInfo) + + # Try to get firmware version from ESP32 + self._load_firmware_version() + + self.__logger.info(f"InstrumentMetadataManager initialized: {self._instrument_info.name}") + + def _load_from_config(self, config): + """ + Load instrument info from SetupInfo.InstrumentInfo configuration. + + Handles the new config.json format with camelCase field names. + """ + try: + # Map SetupInfo.InstrumentInfo fields (camelCase) to InstrumentInfo fields (snake_case) + if hasattr(config, 'name') and config.name: + self._instrument_info.name = config.name + if hasattr(config, 'microscopeType') and config.microscopeType: + self._instrument_info.microscope_type = config.microscopeType + if hasattr(config, 'manufacturer') and config.manufacturer: + self._instrument_info.manufacturer = config.manufacturer + if hasattr(config, 'model') and config.model: + self._instrument_info.model = config.model + if hasattr(config, 'serialNumber') and config.serialNumber: + self._instrument_info.serial_number = config.serialNumber + + # Optical configuration + if hasattr(config, 'tubeLensFocalLengthMm') and config.tubeLensFocalLengthMm: + self._instrument_info.tube_lens_focal_length_mm = config.tubeLensFocalLengthMm + if hasattr(config, 'tubeLensMagnification') and config.tubeLensMagnification: + self._instrument_info.tube_lens_magnification = config.tubeLensMagnification + + # UC2 specific + if hasattr(config, 'uc2FrameName') and config.uc2FrameName: + self._instrument_info.uc2_frame_name = config.uc2FrameName + if hasattr(config, 'uc2FrameAuthor') and config.uc2FrameAuthor: + self._instrument_info.uc2_frame_author = config.uc2FrameAuthor + if hasattr(config, 'uc2FrameVersion') and config.uc2FrameVersion: + self._instrument_info.uc2_frame_version = config.uc2FrameVersion + if hasattr(config, 'uc2Verified'): + self._instrument_info.uc2_verified = config.uc2Verified + + # Load filters from config + if hasattr(config, 'filters') and config.filters: + for f in config.filters: + filter_info = FilterInfo( + name=f.get('name', ''), + filter_type=f.get('filterType', 'Emission'), + wavelength_nm=f.get('wavelengthNm'), + bandwidth_nm=f.get('bandwidthNm'), + manufacturer=f.get('manufacturer', ''), + model=f.get('model', ''), + ) + self._instrument_info.filters.append(filter_info) + + # Load UC2 OptiKit config if path specified + if hasattr(config, 'uc2OptiKitConfigPath') and config.uc2OptiKitConfigPath: + self.load_uc2_optikit_config(config.uc2OptiKitConfigPath) + + self.__logger.info(f"Loaded instrument config: {self._instrument_info.name}") + + except Exception as e: + self.__logger.warning(f"Error loading instrument config: {e}") + + def _load_firmware_version(self): + """Try to load firmware version from ESP32.""" + try: + if 'rs232sManager' in self._lowLevelManagers: + rs232_manager = self._lowLevelManagers['rs232sManager'] + if 'ESP32' in rs232_manager: + esp32 = rs232_manager['ESP32']._esp32 + if hasattr(esp32, 'state') and hasattr(esp32.state, 'get_state'): + state = esp32.state.get_state() + if 'firmware_version' in state: + self._instrument_info.firmware_version = state['firmware_version'] + self.__logger.info(f"ESP32 firmware version: {self._instrument_info.firmware_version}") + except Exception as e: + self.__logger.debug(f"Could not get ESP32 firmware version: {e}") + + def load_uc2_optikit_config(self, config_path: str) -> bool: + """ + Load UC2 OptiKit configuration from JSON file. + + Args: + config_path: Path to UC2 OptiKit JSON configuration file + + Returns: + True if loaded successfully + """ + try: + path = Path(config_path) + if not path.exists(): + self.__logger.warning(f"OptiKit config not found: {config_path}") + return False + + with open(path, 'r') as f: + data = json.load(f) + + return self.load_uc2_optikit_dict(data) + + except Exception as e: + self.__logger.error(f"Error loading OptiKit config: {e}") + return False + + def load_uc2_optikit_dict(self, data: Dict[str, Any]) -> bool: + """ + Load UC2 OptiKit configuration from dictionary. + + Parses the UC2 OptiKit JSON format and converts to InstrumentInfo. + + Args: + data: Dictionary from UC2 OptiKit JSON + + Returns: + True if loaded successfully + """ + try: + # Extract top-level metadata + self._instrument_info.uc2_frame_name = data.get('name', '') + self._instrument_info.uc2_frame_author = data.get('author', '') + self._instrument_info.uc2_frame_version = data.get('version', '1.0.0') + self._instrument_info.uc2_verified = data.get('uc2_verified', False) + + # Update name if frame name is provided + if self._instrument_info.uc2_frame_name: + self._instrument_info.name = f"openUC2: {self._instrument_info.uc2_frame_name}" + + # Parse metadata section + if 'metadata' in data: + meta = data['metadata'] + self._instrument_info.created_at = meta.get('created', '') + + # Parse UC2 components + self._instrument_info.components = [] + for comp_data in data.get('uc2_components', []): + component = OpticalComponent( + name=comp_data.get('name', ''), + module_id=comp_data.get('moduleId', ''), + description=comp_data.get('description', ''), + grid_position=tuple(comp_data.get('grid_pos', [0, 0, 0])), + rotation=tuple(comp_data.get('rotation', [0, 0, 0])), + params=comp_data.get('params', {}), + model=comp_data.get('originalName', ''), + ) + self._instrument_info.components.append(component) + + # Extract filter information from components + self._extract_filters_from_components() + + # Generate configuration UUID based on component hash + self._generate_configuration_uuid() + + self._instrument_info.last_modified = time.strftime("%Y-%m-%dT%H:%M:%S.000Z") + + self.sigInstrumentInfoUpdated.emit(self._instrument_info) + self.__logger.info(f"Loaded UC2 OptiKit config with {len(self._instrument_info.components)} components") + + return True + + except Exception as e: + self.__logger.error(f"Error parsing OptiKit config: {e}") + return False + + def _extract_filters_from_components(self): + """Extract filter information from UC2 components.""" + self._instrument_info.filters = [] + + for comp in self._instrument_info.components: + module_id = comp.module_id.lower() + + # Dichroic filters + if 'dichroic' in module_id or 'filter-dichroic' in module_id: + filter_info = FilterInfo( + name=comp.name, + filter_type="Dichroic", + manufacturer="openUC2", + model=comp.model, + ) + # Try to extract wavelength from description or name + self._extract_wavelength(filter_info, comp) + self._instrument_info.filters.append(filter_info) + + # Emission/Excitation filters + elif 'emifil' in module_id or 'emission' in module_id: + filter_info = FilterInfo( + name=comp.name, + filter_type="Emission", + manufacturer="openUC2", + model=comp.model, + ) + self._extract_wavelength(filter_info, comp) + self._instrument_info.filters.append(filter_info) + + elif 'excfil' in module_id or 'excitation' in module_id: + filter_info = FilterInfo( + name=comp.name, + filter_type="Excitation", + manufacturer="openUC2", + model=comp.model, + ) + self._extract_wavelength(filter_info, comp) + self._instrument_info.filters.append(filter_info) + + def _extract_wavelength(self, filter_info: FilterInfo, component: OpticalComponent): + """Try to extract wavelength from component name/description.""" + import re + + # Look for patterns like "532", "WLS532", "488nm", etc. + text = f"{component.name} {component.description} {component.model}" + + # Match wavelength patterns + match = re.search(r'(\d{3,4})\s*nm?', text, re.IGNORECASE) + if match: + filter_info.wavelength_nm = float(match.group(1)) + + def _generate_configuration_uuid(self): + """Generate a deterministic UUID based on component configuration.""" + import hashlib + + # Create a hash of component names and module IDs + components_str = "|".join( + f"{c.module_id}:{c.name}" + for c in sorted(self._instrument_info.components, key=lambda x: x.name) + ) + + hash_bytes = hashlib.sha256(components_str.encode()).digest()[:16] + self._instrument_info.configuration_uuid = str(uuid.UUID(bytes=hash_bytes)) + + # === Public API === + + @property + def instrument_info(self) -> InstrumentInfo: + """Get current instrument info.""" + return self._instrument_info + + def get_ome_instrument_dict(self) -> Dict[str, Any]: + """ + Get instrument information formatted for OME-types. + + Returns dictionary compatible with ome_types.model.Instrument. + """ + return { + 'id': f"Instrument:{self._instrument_info.configuration_uuid}", + 'name': self._instrument_info.name, + 'microscope': { + 'type': self._instrument_info.microscope_type, + 'manufacturer': self._instrument_info.manufacturer, + 'model': self._instrument_info.model, + 'serial_number': self._instrument_info.serial_number, + }, + # Custom annotations for UC2-specific data + 'annotation': { + 'firmware_version': self._instrument_info.firmware_version, + 'configuration_uuid': self._instrument_info.configuration_uuid, + 'uc2_frame_name': self._instrument_info.uc2_frame_name, + 'uc2_frame_author': self._instrument_info.uc2_frame_author, + 'tube_lens_focal_length_mm': self._instrument_info.tube_lens_focal_length_mm, + }, + } + + def get_filters_for_channel(self, channel_name: str) -> List[FilterInfo]: + """ + Get filters associated with a channel name. + + Args: + channel_name: Name of the imaging channel + + Returns: + List of FilterInfo objects for this channel + """ + # Simple matching based on wavelength in channel name + import re + + match = re.search(r'(\d{3,4})', channel_name) + if not match: + return [] + + wavelength = float(match.group(1)) + + # Return filters with similar wavelength (within 50nm) + return [ + f for f in self._instrument_info.filters + if f.wavelength_nm and abs(f.wavelength_nm - wavelength) < 50 + ] + + def set_firmware_version(self, version: str): + """Update firmware version.""" + self._instrument_info.firmware_version = version + self._instrument_info.last_modified = time.strftime("%Y-%m-%dT%H:%M:%S.000Z") + self.sigInstrumentInfoUpdated.emit(self._instrument_info) + + def set_tube_lens(self, focal_length_mm: float, magnification: float = 1.0): + """Set tube lens parameters.""" + self._instrument_info.tube_lens_focal_length_mm = focal_length_mm + self._instrument_info.tube_lens_magnification = magnification + self._instrument_info.last_modified = time.strftime("%Y-%m-%dT%H:%M:%S.000Z") + self.sigInstrumentInfoUpdated.emit(self._instrument_info) + + def add_filter(self, filter_info: FilterInfo): + """Add a filter to the instrument configuration.""" + self._instrument_info.filters.append(filter_info) + self._instrument_info.last_modified = time.strftime("%Y-%m-%dT%H:%M:%S.000Z") + self.sigInstrumentInfoUpdated.emit(self._instrument_info) + + def to_json(self) -> str: + """Serialize instrument info to JSON string.""" + return json.dumps(self._instrument_info.to_dict(), indent=2) + + def save_to_file(self, filepath: str): + """Save instrument info to JSON file.""" + with open(filepath, 'w') as f: + f.write(self.to_json()) + self.__logger.info(f"Saved instrument metadata to {filepath}") + + +# Copyright (C) 2020-2024 ImSwitch developers +# This file is part of ImSwitch. +# +# ImSwitch is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ImSwitch is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . diff --git a/imswitch/imcontrol/model/managers/MCTManager.py b/imswitch/imcontrol/model/managers/MCTManager.py deleted file mode 100644 index 3d2f3bc0d..000000000 --- a/imswitch/imcontrol/model/managers/MCTManager.py +++ /dev/null @@ -1,45 +0,0 @@ - - -from imswitch.imcommon.framework import Signal, SignalInterface -from imswitch.imcommon.model import initLogger - - -class MCTManager(SignalInterface): - - def __init__(self, mctInfo, *args, **kwargs): - self.sigMCTMaskUpdated = Signal(object) # (maskCombined) # (maskCombined) - super().__init__(*args, **kwargs) - self.__logger = initLogger(self) - - if mctInfo is not None and mctInfo.tWait is not None: - self.tWait = mctInfo.tWait - else: - self.tWait = 0.1 - - self.update() - - - def update(self): - # self.allPatternsPaths - # self.maskDouble = self.__masks[0].concat(self.__masks[1]) - # self.maskCombined = self.maskDouble - # self.sigMCTMaskUpdated.emit(self.maskCombined) - - # returnmask = self.maskDouble - return None # returnmask.image() - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imcontrol/model/managers/ROIScanManager.py b/imswitch/imcontrol/model/managers/ROIScanManager.py index d7f7df8eb..a642ec155 100644 --- a/imswitch/imcontrol/model/managers/ROIScanManager.py +++ b/imswitch/imcontrol/model/managers/ROIScanManager.py @@ -11,8 +11,6 @@ def __init__(self, mctInfo, *args, **kwargs): super().__init__(*args, **kwargs) self.__logger = initLogger(self) - if mctInfo is None: - return self.update() diff --git a/imswitch/imcontrol/model/managers/RecordingManager.py b/imswitch/imcontrol/model/managers/RecordingManager.py index 4bd8e4a1f..1d690c0c6 100644 --- a/imswitch/imcontrol/model/managers/RecordingManager.py +++ b/imswitch/imcontrol/model/managers/RecordingManager.py @@ -1,8 +1,10 @@ import enum import os import time +import queue +import threading from io import BytesIO -from typing import Dict, Optional, Type, List +from typing import Dict, Optional, Type, List, Callable, Any, Tuple import h5py try: import zarr @@ -30,7 +32,251 @@ IS_OME_ZARR = False +# ============================================================================= +# Background Storage Queue - Asynchronous File I/O +# ============================================================================= + +class StorageTask: + """ + A task to be executed by the background storage worker. + + Encapsulates all data needed for a file I/O operation. + """ + def __init__(self, + task_type: str, + filepath: str, + data: Any = None, + attrs: Dict[str, Any] = None, + callback: Callable[[bool, str], None] = None, + priority: int = 0): + """ + Args: + task_type: Type of task ('snap', 'append', 'finalize') + filepath: Target file path + data: Image data (numpy array or dict of arrays) + attrs: Metadata attributes + callback: Optional callback(success: bool, message: str) + priority: Task priority (lower = higher priority) + """ + self.task_type = task_type + self.filepath = filepath + self.data = data + self.attrs = attrs + self.callback = callback + self.priority = priority + self.timestamp = time.time() + + def __lt__(self, other): + """For priority queue ordering.""" + return (self.priority, self.timestamp) < (other.priority, other.timestamp) + + +class BackgroundStorageWorker: + """ + Background worker that handles file I/O operations asynchronously. + + Uses a priority queue to manage storage tasks without blocking + the main acquisition thread. This ensures that image acquisition + continues smoothly while files are being written. + + Features: + - Priority-based task queue + - Non-blocking snap/append operations + - Automatic error handling with callbacks + - Graceful shutdown with queue drain + """ + + def __init__(self, max_queue_size: int = 100): + """ + Args: + max_queue_size: Maximum number of pending tasks (0 = unlimited) + """ + self._logger = initLogger(self) + self._task_queue = queue.PriorityQueue(maxsize=max_queue_size) + self._worker_thread = None + self._stop_event = threading.Event() + self._is_running = False + self._pending_tasks = 0 + self._lock = threading.Lock() + + # Statistics + self._tasks_completed = 0 + self._tasks_failed = 0 + + def start(self): + """Start the background worker thread.""" + if self._is_running: + return + + self._stop_event.clear() + self._worker_thread = threading.Thread(target=self._worker_loop, daemon=True) + self._worker_thread.start() + self._is_running = True + self._logger.info("BackgroundStorageWorker started") + + def stop(self, wait: bool = True, timeout: float = 5.0): + """ + Stop the background worker. + + Args: + wait: If True, wait for pending tasks to complete + timeout: Maximum time to wait for shutdown (seconds) + """ + if not self._is_running: + return + + self._stop_event.set() + + if wait and self._worker_thread: + self._worker_thread.join(timeout=timeout) + + self._is_running = False + self._logger.info(f"BackgroundStorageWorker stopped. Completed: {self._tasks_completed}, Failed: {self._tasks_failed}") + + def submit_task(self, task: StorageTask) -> bool: + """ + Submit a storage task to the queue. + + Args: + task: StorageTask to execute + + Returns: + True if task was queued, False if queue is full + """ + if not self._is_running: + self._logger.warning("Cannot submit task: worker not running") + return False + + try: + self._task_queue.put_nowait(task) + with self._lock: + self._pending_tasks += 1 + return True + except queue.Full: + self._logger.warning("Storage queue is full, task dropped") + if task.callback: + task.callback(False, "Queue full") + return False + + def get_queue_size(self) -> int: + """Get current number of pending tasks.""" + with self._lock: + return self._pending_tasks + + def _worker_loop(self): + """Main worker loop - processes tasks from queue.""" + while not self._stop_event.is_set(): + try: + # Get task with timeout to allow checking stop event + task = self._task_queue.get(timeout=0.1) + + try: + self._execute_task(task) + with self._lock: + self._pending_tasks -= 1 + self._tasks_completed += 1 + except Exception as e: + self._logger.error(f"Task execution failed: {e}") + with self._lock: + self._pending_tasks -= 1 + self._tasks_failed += 1 + if task.callback: + task.callback(False, str(e)) + finally: + self._task_queue.task_done() + + except queue.Empty: + continue + + # Drain remaining tasks on shutdown + while not self._task_queue.empty(): + try: + task = self._task_queue.get_nowait() + self._execute_task(task) + self._task_queue.task_done() + except: + break + + def _execute_task(self, task: StorageTask): + """Execute a single storage task.""" + success = True + message = "OK" + + try: + if task.task_type == 'snap_tiff': + self._snap_tiff(task.filepath, task.data, task.attrs) + elif task.task_type == 'snap_png': + self._snap_png(task.filepath, task.data) + elif task.task_type == 'snap_jpg': + self._snap_jpg(task.filepath, task.data) + elif task.task_type == 'append_tiff': + self._append_tiff(task.filepath, task.data) + else: + message = f"Unknown task type: {task.task_type}" + success = False + + except Exception as e: + success = False + message = str(e) + raise + finally: + if task.callback: + task.callback(success, message) + + def _snap_tiff(self, filepath: str, data: np.ndarray, attrs: Dict[str, Any] = None): + """Write TIFF file with optional OME metadata.""" + if attrs: + tiff.imwrite(filepath, data, metadata=attrs, imagej=False) + else: + tiff.imwrite(filepath, data) + self._logger.debug(f"Saved TIFF: {filepath}") + + def _snap_png(self, filepath: str, data: np.ndarray): + """Write PNG file.""" + if data.dtype == np.float32 or data.dtype == np.float64: + data = cv2.convertScaleAbs(data) + if data.ndim == 2: + data = cv2.cvtColor(data, cv2.COLOR_GRAY2RGB) + cv2.imwrite(filepath, data) + self._logger.debug(f"Saved PNG: {filepath}") + + def _snap_jpg(self, filepath: str, data: np.ndarray): + """Write JPEG file.""" + if data.ndim == 2: + data = cv2.cvtColor(data, cv2.COLOR_GRAY2RGB) + cv2.imwrite(filepath, data) + self._logger.debug(f"Saved JPG: {filepath}") + + def _append_tiff(self, filepath: str, data: np.ndarray): + """Append to existing TIFF file.""" + tiff.imwrite(filepath, data, append=True) # TODO: Add metadata + self._logger.debug(f"Appended to TIFF: {filepath}") + + + +# Global background storage worker instance +_background_storage_worker: Optional[BackgroundStorageWorker] = None + + +def get_background_storage_worker() -> BackgroundStorageWorker: + """Get or create the global background storage worker.""" + global _background_storage_worker + if _background_storage_worker is None: + _background_storage_worker = BackgroundStorageWorker() + _background_storage_worker.start() + return _background_storage_worker + + +def shutdown_background_storage(): + """Shutdown the global background storage worker.""" + global _background_storage_worker + if _background_storage_worker is not None: + _background_storage_worker.stop(wait=True) + _background_storage_worker = None + + def _create_zarr_store(path): + # TODO: REmove """ Create a Zarr store compatible with both Zarr 2.x and 3.x @@ -49,9 +295,25 @@ def _create_zarr_store(path): else: # Zarr 3.x with direct path usage return path + + +# NOTE: AsTemporaryFile is deprecated and should not be used for new code. +# Direct file writing is preferred with proper error handling. +# This class is kept for backwards compatibility but will be removed in a future version. class AsTemporayFile(object): - """ A temporary file that when exiting the context manager is renamed to its original name. """ + # TODO: REmove + """ + DEPRECATED: A temporary file that when exiting the context manager is renamed to its original name. + + This pattern is no longer recommended. Use direct file writing with try/except instead. + """ def __init__(self, filepath, tmp_extension='.tmp'): + import warnings + warnings.warn( + "AsTemporayFile is deprecated. Use direct file writing with error handling.", + DeprecationWarning, + stacklevel=2 + ) if os.path.exists(filepath): raise FileExistsError(f'File {filepath} already exists.') self.path = filepath @@ -80,12 +342,15 @@ def stream(self, data = None, **kwargs): class ZarrStorer(Storer): + # TODO: REmove """ A storer that stores the images in a zarr file store """ def snap(self, images: Dict[str, np.ndarray], attrs: Dict[str, str] = None): if not IS_OME_ZARR: logger.error("OME Zarr is not installed. Please install ome-zarr.") return - with AsTemporayFile(f'{self.filepath}.zarr') as path: + + path = f'{self.filepath}.zarr' + try: datasets: List[dict] = [] store = _create_zarr_store(path) root = zarr.group(store=store) @@ -93,56 +358,256 @@ def snap(self, images: Dict[str, np.ndarray], attrs: Dict[str, str] = None): for channel, image in images.items(): shape = self.detectorManager[channel].shape root.create_dataset(channel, data=image, shape=tuple(reversed(shape)), - chunks=(512, 512), dtype='i2') #TODO: why not dynamic chunking? + chunks=(512, 512), dtype='i2') datasets.append({"path": channel, "transformation": None}) - write_multiscales_metadata(root, datasets, format_from_version("0.2"), shape, **attrs) + + # Write metadata + metadata_kwargs = attrs if attrs else {} + write_multiscales_metadata(root, datasets, format_from_version("0.2"), shape, **metadata_kwargs) logger.info(f"Saved image to zarr file {path}") + except Exception as e: + logger.error(f"Error saving zarr file {path}: {e}") -class HDF5Storer(Storer): - """ A storer that stores the images in a series of hd5 files """ - def snap(self, images: Dict[str, np.ndarray], attrs: Dict[str, str] = None): - for channel, image in images.items(): - with AsTemporayFile(f'{self.filepath}_{channel}.h5') as path: - file = h5py.File(path, 'w') - shape = self.detectorManager[channel].shape - dataset = file.create_dataset('data', tuple(reversed(shape)), dtype='i2') - for key, value in attrs[channel].items(): - try: - dataset.attrs[key] = value - except: - logger.debug(f'Could not put key:value pair {key}:{value} in hdf5 metadata.') - - dataset.attrs['detector_name'] = channel - - # For ImageJ compatibility - dataset.attrs['element_size_um'] = \ - self.detectorManager[channel].pixelSizeUm - - if image.ndim == 3: - dataset[:, ...] = np.moveaxis(image, [0, 1, 2], [2, 1, 0]) - elif image.ndim == 4: - dataset[:, ...] = np.moveaxis(image, [0, 1, 2, 3], [3, 2, 1, 0]) - else: - dataset[:, ...] = np.moveaxis(image, 0, -1) - - file.close() - logger.info(f"Saved image to hdf5 file {path}") class TiffStorer(Storer): - """ A storer that stores the images in a series of tiff files """ + """ A storer that stores the images in a series of tiff files with OME metadata """ + def snap(self, images: Dict[str, np.ndarray], attrs: Dict[str, str] = None): for channel, image in images.items(): - with AsTemporayFile(f'{self.filepath}_{channel}.tiff') as path: - if hasattr(image, "shape"): - tiff.imwrite(path, image,) # TODO: Parse metadata to tiff meta data - logger.info(f"Saved image to tiff file {path}") + path = f'{self.filepath}_{channel}.ome.tiff' + if not hasattr(image, "shape"): + logger.error(f"Could not save image to tiff file {path}") + continue + + try: + # Build OME-TIFF metadata from attrs + ome_metadata = self._build_ome_metadata(channel, image, attrs) + + if ome_metadata: + # Save as OME-TIFF with metadata + tiff.imwrite( + path, + image, + metadata=ome_metadata, + imagej=False, # Use OME metadata, not ImageJ + ) + else: + # Fallback to basic TIFF + tiff.imwrite(path, image) + + logger.info(f"Saved image to tiff file {path}") + except Exception as e: + logger.error(f"Error saving tiff file {path}: {e}") + # Fallback to basic save + tiff.imwrite(path, image) + + def _build_ome_metadata(self, detector_name: str, image: np.ndarray, attrs: Dict[str, str]) -> Optional[Dict]: + """ + Build OME-TIFF metadata dictionary from shared attributes. + + IMPORTANT: The 'detector_name' parameter refers to the camera/detector used. + The actual imaging channel is defined by the active illumination (laser/LED). + We extract this from the shared attributes. + + OME Channel Concept: + - In OME-TIFF, a "channel" represents a specific imaging condition + - This is typically defined by the excitation wavelength (laser/LED) + - The detector (camera) is separate from the channel + + Args: + detector_name: Name of the detector/camera (e.g., "WidefieldCamera") + image: Image array + attrs: Shared attributes dictionary (may contain SharedAttrValue objects) + + Returns: + Dictionary with OME metadata or None if attrs is empty + """ + if not attrs: + return None + + metadata = {} + + def _get_value(val): + """Extract value from SharedAttrValue or return raw value.""" + return val.value if hasattr(val, 'value') else val + + def _search_attr(patterns: list, search_dict: dict): + """Search for attribute using multiple key patterns.""" + for pattern in patterns: + # Direct match + if pattern in search_dict: + return _get_value(search_dict[pattern]) + # Substring match + for key in search_dict.keys(): + if pattern in str(key): + return _get_value(search_dict[key]) + return None + + try: + # === Detector/Camera Information === + metadata['Detector'] = detector_name + + # Extract pixel size + pixel_size = _search_attr([ + f'Detector:{detector_name}:PixelSizeUm', + 'Detector:PixelSizeUm', + 'PixelSizeUm' + ], attrs) + + if pixel_size: + metadata['PhysicalSizeX'] = float(pixel_size) + metadata['PhysicalSizeY'] = float(pixel_size) + metadata['PhysicalSizeXUnit'] = 'µm' + metadata['PhysicalSizeYUnit'] = 'µm' + + # Extract exposure + exposure = _search_attr([ + f'Detector:{detector_name}:ExposureMs', + 'Detector:ExposureMs', + 'ExposureMs' + ], attrs) + + if exposure: + metadata['ExposureTime'] = float(exposure) / 1000.0 # Convert to seconds + metadata['ExposureTimeUnit'] = 's' + + # === Stage Position Information === + # Search for positioner positions using various key patterns + for axis in ['X', 'Y', 'Z']: + position = None + # Try different key patterns + for key, val in attrs.items(): + key_str = str(key) + # Match patterns like 'Positioner:ESP32Stage:X:Position' or 'Positioner:Stage:X:Position' + if 'Positioner:' in key_str and f':{axis}:Position' in key_str: + position = _get_value(val) + break + + if position is not None: + metadata[f'Position{axis}'] = float(position) + metadata[f'Position{axis}Unit'] = 'µm' + + # === Illumination / Channel Information === + # The imaging channel is defined by active illumination sources + active_lasers = [] + active_leds = [] + + # Find all laser sources and check if they are active + laser_sources = {} # {laser_name: {wavelength, value, enabled}} + for key, val in attrs.items(): + key_str = str(key) + if key_str.startswith('Laser:'): + parts = key_str.split(':') + if len(parts) >= 3: + laser_name = parts[1] + attr_name = parts[2] + + if laser_name not in laser_sources: + laser_sources[laser_name] = {} + + laser_sources[laser_name][attr_name] = _get_value(val) + + # Determine which lasers are active (Enabled=True AND Value>0) + for laser_name, laser_data in laser_sources.items(): + is_enabled = laser_data.get('Enabled', False) + value = laser_data.get('Value', 0) + wavelength = laser_data.get('WavelengthNm', 0) + + # Consider laser active if enabled AND has non-zero power + if is_enabled and value and float(value) > 0: + active_lasers.append({ + 'Name': laser_name, + 'WavelengthNm': float(wavelength) if wavelength else None, + 'Power': float(value), + 'IsActive': True, + }) + + # Similarly for LEDs + led_sources = {} + for key, val in attrs.items(): + key_str = str(key) + if key_str.startswith('LED:'): + parts = key_str.split(':') + if len(parts) >= 3: + led_name = parts[1] + attr_name = parts[2] + + if led_name not in led_sources: + led_sources[led_name] = {} + + led_sources[led_name][attr_name] = _get_value(val) + + for led_name, led_data in led_sources.items(): + is_enabled = led_data.get('Enabled', False) + value = led_data.get('Value', 0) + + if is_enabled and value and float(value) > 0: + active_leds.append({ + 'Name': led_name, + 'Value': float(value), + 'IsActive': True, + }) + + # Store all illumination info + if active_lasers: + metadata['ActiveLasers'] = active_lasers + # Set primary channel based on first active laser's wavelength + if active_lasers[0].get('WavelengthNm'): + metadata['ExcitationWavelength'] = active_lasers[0]['WavelengthNm'] + metadata['ExcitationWavelengthUnit'] = 'nm' + # Build a descriptive channel name + metadata['Channel'] = f"{int(active_lasers[0]['WavelengthNm'])}nm" else: - logger.error(f"Could not save image to tiff file {path}") + metadata['Channel'] = active_lasers[0]['Name'] + elif active_leds: + metadata['ActiveLEDs'] = active_leds + metadata['Channel'] = active_leds[0]['Name'] + else: + # No active illumination - use detector name as fallback + metadata['Channel'] = f"Brightfield_{detector_name}" + + # Store all laser sources (including inactive) for reference + all_lasers = [] + for laser_name, laser_data in laser_sources.items(): + all_lasers.append({ + 'Name': laser_name, + 'WavelengthNm': float(laser_data.get('WavelengthNm', 0)) if laser_data.get('WavelengthNm') else None, + 'Power': float(laser_data.get('Value', 0)) if laser_data.get('Value') else 0, + 'Enabled': laser_data.get('Enabled', False), + }) + if all_lasers: + metadata['Lasers'] = all_lasers + + # === Objective Information === + objective_name = _search_attr(['Objective:Name', 'ObjectiveName'], attrs) + if objective_name: + metadata['Objective'] = str(objective_name) + + magnification = _search_attr(['Objective:Magnification', 'ObjectiveMagnification'], attrs) + if magnification: + metadata['Magnification'] = float(magnification) + + na = _search_attr(['Objective:NA', 'ObjectiveNA'], attrs) + if na: + metadata['NumericalAperture'] = float(na) + + # === Timestamp === + import datetime + metadata['DateTime'] = datetime.datetime.now().isoformat() + + return metadata if metadata else None + + except Exception as e: + logger.warning(f"Error building OME metadata: {e}") + import traceback + logger.debug(traceback.format_exc()) + return None class PNGStorer(Storer): + # TODO: Is redudant with the BackgroundStorageWorker implementation? If so, merge! """ A storer that stores the images in a series of png files """ def snap(self, images: Dict[str, np.ndarray], attrs: Dict[str, str] = None): for channel, image in images.items(): @@ -159,6 +624,7 @@ def snap(self, images: Dict[str, np.ndarray], attrs: Dict[str, str] = None): class JPGStorer(Storer): + # TODO: Is redudant with the BackgroundStorageWorker implementation? If so, merge! """ A storer that stores the images in a series of jpg files """ def snap(self, images: Dict[str, np.ndarray], attrs: Dict[str, str] = None): for channel, image in images.items(): @@ -178,6 +644,7 @@ def snap(self, images: Dict[str, np.ndarray], attrs: Dict[str, str] = None): class SaveMode(enum.Enum): + # TODO: Move up/ make modular Disk = 1 RAM = 2 DiskAndRAM = 3 @@ -185,8 +652,8 @@ class SaveMode(enum.Enum): class SaveFormat(enum.Enum): + # TODO: Move up/ make modular TIFF = 1 - HDF5 = 2 ZARR = 3 MP4 = 4 PNG = 5 @@ -194,8 +661,8 @@ class SaveFormat(enum.Enum): DEFAULT_STORER_MAP: Dict[str, Type[Storer]] = { - SaveFormat.ZARR: ZarrStorer, - SaveFormat.HDF5: HDF5Storer, + # TODO: Move up/ make modular + SaveFormat.ZARR: ZarrStorer, # TODO: REmove SaveFormat.TIFF: TiffStorer, SaveFormat.MP4: MP4Storer, SaveFormat.PNG: PNGStorer, @@ -207,6 +674,7 @@ class RecordingManager(SignalInterface): """ RecordingManager handles single frame captures as well as continuous recordings of detector data. """ + # TODO: This needs a full rework - I'm not sure if the signals are still needed and used in the IS_HEADLESS Mode anywhere anymore, probalby we should remove it entireely sigRecordingStarted = Signal() sigRecordingEnded = Signal() sigRecordingFrameNumUpdated = Signal(int) # (frameNumber) @@ -237,6 +705,8 @@ def __init__(self, detectorsManager, storerMap: Optional[Dict[str, Type[Storer]] def __del__(self): self.endRecording(emitSignal=False, wait=True) + # Wait for any pending background I/O to complete + self.wait_for_io_complete(timeout=10.0) if hasattr(super(), '__del__'): super().__del__() @@ -250,14 +720,14 @@ def detectorsManager(self): return self.__detectorsManager def startRecording(self, detectorNames, recMode, savename, saveMode, attrs, - saveFormat=SaveFormat.HDF5, singleMultiDetectorFile=False, singleLapseFile=False, + saveFormat=SaveFormat.TIFF, singleMultiDetectorFile=False, singleLapseFile=False, recFrames=None, recTime=None): """ Starts a recording with the specified detectors, recording mode, file name prefix and attributes to save to the recording per detector. In SpecFrames mode, recFrames (the number of frames) must be specified, and in SpecTime mode, recTime (the recording time in seconds) must be specified. """ - + # TODO: This is not used in most cases other than recording MP4, so I guess it would be wise to entirely remove this part and create a new way for saving videos by copying the existing implementation into a new place; Also we want to merge it with the self.__logger.info('Starting recording') self.__record = True self.__recordingWorker.detectorNames = detectorNames @@ -293,10 +763,23 @@ def endRecording(self, emitSignal=True, wait=True): if wait: self._thread.wait() - def snap(self, detectorNames=None, savename="", saveMode=SaveMode.Disk, saveFormat=SaveFormat.TIFF, attrs=None): + def snap(self, detectorNames=None, savename="", saveMode=SaveMode.Disk, saveFormat=SaveFormat.TIFF, attrs=None, + use_background_io: bool = True, io_callback: Callable[[bool, str], None] = None): """ Saves an image with the specified detectors to a file with the specified name prefix, save mode, file format and attributes - to save to the capture per detector. """ + to save to the capture per detector. + + Args: + detectorNames: List of detector names to capture. If None, all detectors. + savename: File path prefix for saving. + saveMode: SaveMode.Disk, SaveMode.RAM, SaveMode.DiskAndRAM, or SaveMode.Numpy + saveFormat: SaveFormat.TIFF, SaveFormat.PNG, SaveFormat.JPG, etc. + attrs: Dictionary of metadata attributes to save. + use_background_io: If True (default), use background queue for non-blocking I/O. + Set to False for synchronous writes (blocks until complete). + io_callback: Optional callback(success: bool, message: str) called when + background I/O completes. Only used when use_background_io=True. + """ acqHandle = self.__detectorsManager.startAcquisition() if detectorNames is None: @@ -311,12 +794,15 @@ def snap(self, detectorNames=None, savename="", saveMode=SaveMode.Disk, saveForm image = images[detectorName] if saveFormat: - storer = self.__storerMap[saveFormat] - if saveMode == SaveMode.Disk or saveMode == SaveMode.DiskAndRAM: - # Save images to disk - store = storer(savename, self.__detectorsManager) - store.snap(images, attrs) + if use_background_io: + # Use background queue for non-blocking I/O + self._snap_background(images, savename, saveFormat, attrs, io_callback) + else: + # Synchronous write (original behavior) + storer = self.__storerMap[saveFormat] + store = storer(savename, self.__detectorsManager) + store.snap(images, attrs) if saveMode == SaveMode.RAM or saveMode == SaveMode.DiskAndRAM: for channel, image in images.items(): @@ -331,6 +817,88 @@ def snap(self, detectorNames=None, savename="", saveMode=SaveMode.Disk, saveForm if saveMode == SaveMode.Numpy: return images + def _snap_background(self, images: Dict[str, np.ndarray], savename: str, + saveFormat: SaveFormat, attrs: Dict[str, str] = None, + callback: Callable[[bool, str], None] = None): + """ + Queue images for background saving. + + This method is non-blocking - it submits tasks to the background + storage worker and returns immediately. + """ + worker = get_background_storage_worker() + + for channel, image in images.items(): + # Build OME metadata for each image + ome_attrs = self._build_ome_metadata(channel, image, attrs) + + # Determine task type and filepath based on format + if saveFormat == SaveFormat.TIFF: + task_type = 'snap_tiff' + filepath = f'{savename}_{channel}.tiff' + task_attrs = ome_attrs + elif saveFormat == SaveFormat.PNG: + task_type = 'snap_png' + filepath = f'{savename}_{channel}.png' + task_attrs = None # PNG doesn't support metadata + elif saveFormat == SaveFormat.JPG: + task_type = 'snap_jpg' + filepath = f'{savename}_{channel}.jpg' + task_attrs = None # JPG doesn't support metadata + else: + # Unsupported format for background I/O, fall back to sync + self.__logger.warning(f"Format {saveFormat} not supported for background I/O, using sync write") + storer = self.__storerMap[saveFormat] + store = storer(savename, self.__detectorsManager) + store.snap({channel: image}, attrs) + continue + + # Make a copy of the image data for thread safety + image_copy = image.copy() + + task = StorageTask( + task_type=task_type, + filepath=filepath, + data=image_copy, + attrs=task_attrs, + callback=callback, + priority=0 # Normal priority + ) + + if not worker.submit_task(task): + self.__logger.warning(f"Failed to queue storage task for {filepath}") + + def get_pending_io_count(self) -> int: + """ + Get the number of pending background I/O operations. + + Useful for checking if all writes have completed before + ending an experiment or closing the application. + """ + try: + worker = get_background_storage_worker() + return worker.get_queue_size() + except: + return 0 + + def wait_for_io_complete(self, timeout: float = 30.0) -> bool: + """ + Wait for all pending background I/O operations to complete. + + Args: + timeout: Maximum time to wait in seconds. + + Returns: + True if all I/O completed, False if timeout reached. + """ + start = time.time() + while self.get_pending_io_count() > 0: + if time.time() - start > timeout: + self.__logger.warning(f"Timeout waiting for I/O completion, {self.get_pending_io_count()} tasks pending") + return False + time.sleep(0.1) + return True + def snapImagePrev(self, detectorName, savename, saveFormat, image, attrs): """ Saves a previously taken image to a file with the specified name prefix, @@ -338,28 +906,7 @@ def snapImagePrev(self, detectorName, savename, saveFormat, image, attrs): fileExtension = str(saveFormat.name).lower() filePath = self.getSaveFilePath(f'{savename}_{detectorName}.{fileExtension}') - # Write file - if saveFormat == SaveFormat.HDF5: - file = h5py.File(filePath, 'w') - - shape = image.shape - dataset = file.create_dataset('data', tuple(reversed(shape)), dtype='i2') - - for key, value in attrs[detectorName].items(): - try: - dataset.attrs[key] = value - except: - self.__logger.debug(f'Could not put key:value pair {key}:{value} in hdf5 metadata.') - - dataset.attrs['detector_name'] = detectorName - - # For ImageJ compatibility - dataset.attrs['element_size_um'] = \ - self.__detectorsManager[detectorName].pixelSizeUm - - dataset[:, ...] = np.moveaxis(image, 0, -1) - file.close() - elif saveFormat == SaveFormat.TIFF: + if saveFormat == SaveFormat.TIFF: tiff.imwrite(filePath, image) elif saveFormat == SaveFormat.PNG: cv2.imwrite(filePath, image) @@ -417,7 +964,7 @@ def run(self): self.__recordingManager.detectorsManager.stopAcquisition(acqHandle) def _record(self): - if self.saveFormat == SaveFormat.HDF5 or self.saveFormat == SaveFormat.ZARR: + if self.saveFormat == SaveFormat.ZARR: files, fileDests, filePaths = self._getFiles() shapes = {detectorName: self.__recordingManager.detectorsManager[detectorName].shape @@ -446,26 +993,7 @@ def _record(self): if len(shape) > 2: shape = shape[-2:] - if self.saveFormat == SaveFormat.HDF5: - # Initial number of frames must not be 0; otherwise, too much disk space may get - # allocated. We remove this default frame later on if no frames are captured. - datasets[detectorName] = files[detectorName].create_dataset( - datasetName, (1, *reversed(shape)), - maxshape=(None, *reversed(shape)), - dtype='i2' - ) - - for key, value in self.attrs[detectorName].items(): - datasets[detectorName].attrs[key] = value - - datasets[detectorName].attrs['detector_name'] = detectorName - - # For ImageJ compatibility - datasets[detectorName].attrs['element_size_um'] \ - = self.__recordingManager.detectorsManager[detectorName].pixelSizeUm - datasets[detectorName].attrs['writing'] = True - - elif self.saveFormat == SaveFormat.TIFF: + if self.saveFormat == SaveFormat.TIFF: fileExtension = str(self.saveFormat.name).lower() filenames[detectorName] = self.__recordingManager.getSaveFilePath( f'{self.savename}_{detectorName}.{fileExtension}', False, False) @@ -518,16 +1046,6 @@ def _record(self): filePath = self.__recordingManager.getSaveFilePath( f'{self.savename}_{detectorName}.{fileExtension}', False, False) continue - elif self.saveFormat == SaveFormat.HDF5: - dataset = datasets[detectorName] - if (it + n) <= recFrames: - dataset.resize(n + it, axis=0) - dataset[it:it + n, :, :] = newFrames - currentFrame[detectorName] += n - else: - dataset.resize(recFrames, axis=0) - dataset[it:recFrames, :, :] = newFrames[0:recFrames - it] - currentFrame[detectorName] = recFrames elif self.saveFormat == SaveFormat.ZARR: dataset = datasets[detectorName] if it == 0: @@ -570,7 +1088,7 @@ def _record(self): filePath = self.__recordingManager.getSaveFilePath( f'{self.savename}_{detectorName}.{fileExtension}', False, False) continue - elif self.saveFormat == SaveFormat.HDF5 or self.saveFormat == SaveFormat.ZARR: + elif self.saveFormat == SaveFormat.ZARR: it = currentFrame[detectorName] dataset = datasets[detectorName] dataset.resize(n + it, axis=0) @@ -608,12 +1126,6 @@ def _record(self): f'{self.savename}_{detectorName}.{fileExtension}', False, False) continue - elif self.saveFormat == SaveFormat.HDF5: - it = currentFrame[detectorName] - dataset = datasets[detectorName] - dataset.resize(n + it, axis=0) - dataset[it:it + n, :, :] = newFrames - elif self.saveFormat == SaveFormat.ZARR: it = currentFrame[detectorName] dataset = datasets[detectorName] @@ -637,14 +1149,9 @@ def _record(self): raise ValueError('Unsupported recording mode specified') finally: - if self.saveFormat == SaveFormat.HDF5 or self.saveFormat == SaveFormat.ZARR: + if self.saveFormat == SaveFormat.ZARR: for detectorName, file in files.items(): # Remove default frame if no frames have been captured - if currentFrame[detectorName] < 1: - if self.saveFormat == SaveFormat.HDF5: - datasets[detectorName].resize(0, axis=0) - - # Handle memory recordings if self.saveMode == SaveMode.RAM or self.saveMode == SaveMode.DiskAndRAM: filePath = filePaths[detectorName] name = os.path.basename(filePath) @@ -660,10 +1167,7 @@ def _record(self): ) else: datasets[detectorName].attrs['writing'] = False - if self.saveFormat == SaveFormat.HDF5: - file.close() - else: - self.store.close() + self.store.close() emitSignal = True if self.recMode in [RecMode.SpecFrames, RecMode.ScanOnce, RecMode.ScanLapse]: emitSignal = False @@ -676,7 +1180,7 @@ def _getFiles(self): files = {} fileDests = {} filePaths = {} - extension = 'hdf5' if self.saveFormat == SaveFormat.HDF5 else 'zarr' + extension = 'zarr' for detectorName in self.detectorNames: if singleMultiDetectorFile: @@ -703,10 +1207,7 @@ def _getFiles(self): if singleMultiDetectorFile and len(files) > 0: files[detectorName] = list(files.values())[0] else: - if self.saveFormat == SaveFormat.HDF5: - files[detectorName] = h5py.File(fileDests[detectorName], - 'a' if singleLapseFile else 'w-') - elif self.saveFormat == SaveFormat.ZARR: + if self.saveFormat == SaveFormat.ZARR: self.store = _create_zarr_store(fileDests[detectorName]) files[detectorName] = zarr.group(store=self.store, overwrite=True) @@ -738,7 +1239,7 @@ def moveToThread(self, thread) -> None: def _record(self): self.__logger.info('Recording started in mode: ' + str(self.recMode)) - if self.saveFormat == SaveFormat.HDF5 or self.saveFormat == SaveFormat.ZARR: + if self.saveFormat == SaveFormat.ZARR: files, fileDests, filePaths = self._getFiles() shapes = {detectorName: self.__recordingManager.detectorsManager[detectorName].shape @@ -767,37 +1268,7 @@ def _record(self): if len(shape) > 2: shape = shape[-2:] - if self.saveFormat == SaveFormat.HDF5: - # Initial number of frames must not be 0; otherwise, too much disk space may get - # allocated. We remove this default frame later on if no frames are captured. - datasets[detectorName] = files[detectorName].create_dataset( - datasetName, (1, *reversed(shape)), - maxshape=(None, *reversed(shape)), - dtype='i2' - ) - - for key, value in self.attrs[detectorName].items(): - self.__logger.debug(key) - self.__logger.debug(value) - try: - datasets[detectorName].attrs[key] = value - except: - pass - - datasets[detectorName].attrs['detector_name'] = detectorName - - # For ImageJ compatibility - datasets[detectorName].attrs['element_size_um'] \ - = self.__recordingManager.detectorsManager[detectorName].pixelSizeUm - datasets[detectorName].attrs['writing'] = True - - for key, value in self.attrs[detectorName].items(): - try: - datasets[detectorName].attrs[key] = value - except: - pass - - elif self.saveFormat == SaveFormat.MP4: + if self.saveFormat == SaveFormat.MP4: # Need to initiliaze videowriter for each detector self.__logger.debug("Initialize MP4 recorder") fourcc = cv2.VideoWriter_fourcc(*'mp4v') @@ -873,16 +1344,7 @@ def _record(self): filePath = self.__recordingManager.getSaveFilePath( f'{self.savename}_{detectorName}.{fileExtension}', False, False) continue - elif self.saveFormat == SaveFormat.HDF5: - dataset = datasets[detectorName] - if (it + n) <= recFrames: - dataset.resize(n + it, axis=0) - dataset[it:it + n, :, :] = newFrames - currentFrame[detectorName] += n - else: - dataset.resize(recFrames, axis=0) - dataset[it:recFrames, :, :] = newFrames[0:recFrames - it] - currentFrame[detectorName] = recFrames + elif self.saveFormat == SaveFormat.ZARR: dataset = datasets[detectorName] if it == 0: @@ -934,7 +1396,7 @@ def _record(self): filePath = self.__recordingManager.getSaveFilePath( f'{self.savename}_{detectorName}.{fileExtension}', False, False) continue - elif self.saveFormat == SaveFormat.HDF5 or self.saveFormat == SaveFormat.ZARR: + elif self.saveFormat == SaveFormat.ZARR: it = currentFrame[detectorName] dataset = datasets[detectorName] dataset.resize(n + it, axis=0) @@ -981,12 +1443,6 @@ def _record(self): f'{self.savename}_{detectorName}.{fileExtension}', False, False) continue - elif self.saveFormat == SaveFormat.HDF5: - it = currentFrame[detectorName] - dataset = datasets[detectorName] - dataset.resize(n + it, axis=0) - dataset[it:it + n, :, :] = newFrames - elif self.saveFormat == SaveFormat.ZARR: it = currentFrame[detectorName] dataset = datasets[detectorName] @@ -1018,12 +1474,9 @@ def _record(self): raise ValueError('Unsupported recording mode specified') finally: - if self.saveFormat == SaveFormat.HDF5 or self.saveFormat == SaveFormat.ZARR: + if self.saveFormat == SaveFormat.ZARR: for detectorName, file in files.items(): # Remove default frame if no frames have been captured - if currentFrame[detectorName] < 1: - if self.saveFormat == SaveFormat.HDF5: - datasets[detectorName].resize(0, axis=0) # Handle memory recordings if self.saveMode == SaveMode.RAM or self.saveMode == SaveMode.DiskAndRAM: @@ -1041,9 +1494,7 @@ def _record(self): ) else: datasets[detectorName].attrs['writing'] = False - if self.saveFormat == SaveFormat.HDF5: - file.close() - elif self.saveFormat == SaveFormat.MP4: + if self.saveFormat == SaveFormat.MP4: for detectorName, file in files.items(): datasets[detectorName].release() else: @@ -1060,7 +1511,7 @@ def _getFiles(self): files = {} fileDests = {} filePaths = {} - extension = 'hdf5' if self.saveFormat == SaveFormat.HDF5 else 'zarr' + extension = 'zarr' for detectorName in self.detectorNames: if singleMultiDetectorFile: @@ -1087,10 +1538,7 @@ def _getFiles(self): if singleMultiDetectorFile and len(files) > 0: files[detectorName] = list(files.values())[0] else: - if self.saveFormat == SaveFormat.HDF5: - files[detectorName] = h5py.File(fileDests[detectorName], - 'a' if singleLapseFile else 'w-') - elif self.saveFormat == SaveFormat.ZARR: + if self.saveFormat == SaveFormat.ZARR: self.store = _create_zarr_store(fileDests[detectorName]) files[detectorName] = zarr.group(store=self.store, overwrite=True) diff --git a/imswitch/imcontrol/model/managers/ScanManagerMoNaLISA.py b/imswitch/imcontrol/model/managers/ScanManagerMoNaLISA.py deleted file mode 100644 index 81b296ab2..000000000 --- a/imswitch/imcontrol/model/managers/ScanManagerMoNaLISA.py +++ /dev/null @@ -1,57 +0,0 @@ -from .ScanManagerBase import SuperScanManager - - -class ScanManagerMoNaLISA(SuperScanManager): - """ ScanManager helps with generating signals for scanning. """ - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - @property - def TTLTimeUnits(self): - self._checkScanDefined() - return self._TTLCycleDesigner.timeUnits - - def makeFullScan(self, scanParameters, TTLParameters, staticPositioner=False): - """ Generates stage and TTL scan signals. """ - self._checkScanDefined() - - if not staticPositioner: - scanSignalsDict, positions, scanInfoDict = self.getScanSignalsDict(scanParameters) - if not self._scanDesigner.checkSignalComp( - scanParameters, self._setupInfo, scanInfoDict - ): - self._logger.error( - 'Signal voltages outside scanner ranges: try scanning a smaller ROI or a slower' - ' scan.' - ) - return - - TTLCycleSignalsDict = self.getTTLCycleSignalsDict(TTLParameters, scanInfoDict) - else: - TTLCycleSignalsDict = self.getTTLCycleSignalsDict(TTLParameters) - scanSignalsDict = {} - scanInfoDict = {} - - return ( - {'scanSignalsDict': scanSignalsDict, - 'TTLCycleSignalsDict': TTLCycleSignalsDict}, - scanInfoDict - ) - - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imcontrol/model/managers/StandManager.py b/imswitch/imcontrol/model/managers/StandManager.py deleted file mode 100644 index 2dc72b30f..000000000 --- a/imswitch/imcontrol/model/managers/StandManager.py +++ /dev/null @@ -1,45 +0,0 @@ -from imswitch.imcommon.model import initLogger -from imswitch.imcommon.model import pythontools -from abc import ABC -import importlib - - -class StandManager(ABC): - """ StandManager interface for dealing with microscope stand managers. """ - def __init__(self, deviceInfo, **lowLevelManagers): - self.__logger = initLogger(self) - self._subManager = None - currentPackage = '.'.join(__name__.split('.')[:-1]) - if deviceInfo: - # Create sub-manager - try: - package = importlib.import_module( - pythontools.joinModulePath(f'{currentPackage}.{"stands"}',deviceInfo.managerName)) - manager = getattr(package, deviceInfo.managerName) - self._subManager = manager(deviceInfo, **lowLevelManagers) - except: - self.__logger.error('Failed to load LeicaDMIManager (not provided due to NDA). Loading mocker.') - package = importlib.import_module( - pythontools.joinModulePath(f'{currentPackage}.{"stands"}',f'{deviceInfo.managerName}_mock')) - manager = getattr(package, f'Mock{deviceInfo.managerName}') - self._subManager = manager(deviceInfo, **lowLevelManagers) - - def motCorrPos(self, position): - self._subManager.motCorrPos(position) - - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imcontrol/model/managers/__init__.py b/imswitch/imcontrol/model/managers/__init__.py index d91de9467..2512cb484 100644 --- a/imswitch/imcontrol/model/managers/__init__.py +++ b/imswitch/imcontrol/model/managers/__init__.py @@ -14,13 +14,10 @@ from .SLMManager import SLMManager from .ScanManagerPointScan import ScanManagerPointScan from .ScanManagerBase import ScanManagerBase -from .ScanManagerMoNaLISA import ScanManagerMoNaLISA -from .StandManager import StandManager from .RotatorsManager import RotatorsManager from .UC2ConfigManager import UC2ConfigManager from .SIMManager import SIMManager from .DPCManager import DPCManager -from .MCTManager import MCTManager from .TimelapseManager import TimelapseManager from .ExperimentManager import ExperimentManager from .ROIScanManager import ROIScanManager @@ -36,3 +33,4 @@ from .FlatfieldManager import FlatfieldManager from .PixelCalibrationManager import PixelCalibrationManager from .ArkitektManager import ArkitektManager +from .InstrumentMetadataManager import InstrumentMetadataManager diff --git a/imswitch/imcontrol/model/managers/stands/LeicaDMIManager_mock.py b/imswitch/imcontrol/model/managers/stands/LeicaDMIManager_mock.py deleted file mode 100644 index 18aee22d3..000000000 --- a/imswitch/imcontrol/model/managers/stands/LeicaDMIManager_mock.py +++ /dev/null @@ -1,54 +0,0 @@ -from imswitch.imcommon.model import initLogger - - -class MockLeicaDMIManager: - def __init__(self, deviceInfo, *args, **kwargs): - self.__logger = initLogger(self) - try: - self._rs232Manager = kwargs['rs232sManager']._subManagers[deviceInfo.rs232device] - except: - self.__logger.error(f'Failed to access Leica DMI stand RS232 connection with name {deviceInfo.rs232device}, define it in your setup .json. Loading mocker.') - from imswitch.imcontrol.model.interfaces.RS232Driver_mock import MockRS232Driver - self._rs232Manager = MockRS232Driver(name=deviceInfo.rs232device, settings={'port': 'Mock'}) - - def move(self, value, *args): - if not int(value) == 0: - cmd = str(int(value)) - self._rs232Manager.write(cmd) - - self._position = self._position + value - return self._position - - def setPosition(self, value, *args): - cmd = str(int(value)) - self._rs232Manager.write(cmd) - - self._position = value - return self._position - - def returnMod(self, reply): - return reply - - def position(self, *args): - cmd = '000' - return self.returnMod(self._rs232Manager.send(cmd)) - - def motCorrPos(self, value): - """ Absolute mot_corr position movement. """ - movetopos = int(round(value)) - cmd = str(movetopos) - self._rs232Manager.write(cmd) - - # the serial command automatically sleeps until a reply is gotten, which it gets after flip is finished - def setFLUO(self, *args): - cmd = '000' - self._rs232Manager.query(cmd) - - # the serial command automatically sleeps until a reply is gotten, which it gets after flip is finished - def setCS(self, *args): - cmd = '000' - self._rs232Manager.query(cmd) - - def setILshutter(self, value): - cmd = str(value) - self._rs232Manager.query(cmd) diff --git a/imswitch/imcontrol/model/managers/stands/__init__.py b/imswitch/imcontrol/model/managers/stands/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/imswitch/imcontrol/model/metadata/__init__.py b/imswitch/imcontrol/model/metadata/__init__.py new file mode 100644 index 000000000..6c5497b13 --- /dev/null +++ b/imswitch/imcontrol/model/metadata/__init__.py @@ -0,0 +1,23 @@ +""" +Metadata Hub and OME-types integration for ImSwitch. + +This package provides: +- MetadataHub: Central aggregator for hardware state and detector metadata +- DetectorContext: Detector-specific metadata (pixel size, shape, transforms) +- Schema: Standardized metadata keys and normalization +- SharedAttrs Bridge: Connects legacy SharedAttributes to MetadataHub +""" + +from .metadata_hub import MetadataHub, DetectorContext, FrameEvent +from .schema import MetadataSchema, MetadataCategory, SharedAttrValue +from .sharedattrs_bridge import SharedAttrsMetadataBridge + +__all__ = [ + 'MetadataHub', + 'DetectorContext', + 'FrameEvent', + 'MetadataSchema', + 'MetadataCategory', + 'SharedAttrValue', + 'SharedAttrsMetadataBridge', +] diff --git a/imswitch/imcontrol/model/metadata/metadata_hub.py b/imswitch/imcontrol/model/metadata/metadata_hub.py new file mode 100644 index 000000000..3c2f18f11 --- /dev/null +++ b/imswitch/imcontrol/model/metadata/metadata_hub.py @@ -0,0 +1,720 @@ +""" +Central Metadata Hub for ImSwitch with OME-types integration. + +Aggregates hardware state and detector-specific metadata, +providing a clean interface for recording and OME writers. +""" + +from dataclasses import dataclass, field +from typing import Any, Dict, List, Optional, Tuple +from collections import deque, defaultdict +import time +import threading +import numpy as np + +try: + from ome_types import OME + from ome_types.model import Image, Pixels, Channel, Plane + from ome_types.model import Instrument, Objective as OMEObjective + from ome_types.model.simple_types import UnitsLength, UnitsTime + from ome_types.model import PixelsType + HAS_OME_TYPES = True +except ImportError: + HAS_OME_TYPES = False + # Set all to None for consistency + OME = None + Image = None + Pixels = None + Channel = None + Plane = None + Instrument = None + OMEObjective = None + UnitsLength = None + UnitsTime = None + PixelsType = None + +from .schema import SharedAttrValue, MetadataSchema + + +@dataclass +class DetectorContext: + """ + Detector-specific metadata context. + + Tracks all metadata needed to generate OME-compliant detector/image metadata. + Similar to Micro-Manager's ImageMetadata but adapted for ImSwitch. + """ + name: str + + # Required physical properties + shape_px: Tuple[int, int] # (width, height) in pixels + pixel_size_um: float # Physical pixel size in micrometers + dtype: str = 'uint16' # Numpy dtype string + + # Optional derived/explicit properties + fov_um: Optional[Tuple[float, float]] = None # Field of view (width, height) in um + binning: int = 1 + roi: Optional[Tuple[int, int, int, int]] = None # (x, y, w, h) + + # Channel info + channel_name: Optional[str] = None + channel_color: Optional[str] = None # Hex color like "00FF00" + wavelength_nm: Optional[float] = None + + # Camera settings + exposure_ms: Optional[float] = None + gain: Optional[float] = None + temperature_c: Optional[float] = None + bit_depth: Optional[int] = None + + # Frame info from camera hardware + frame_number: Optional[int] = None # Hardware frame number + frame_timestamp: Optional[float] = None # Hardware frame timestamp + is_rgb: bool = False # Whether detector outputs RGB images + + # Transforms and calibration + affine_transform: Optional[np.ndarray] = None # 3x3 affine matrix + objective_name: Optional[str] = None + objective_magnification: Optional[float] = None + objective_na: Optional[float] = None + calibration_hash: Optional[str] = None + + # Metadata + last_update: float = field(default_factory=time.time) + + def __post_init__(self): + """Calculate derived properties.""" + if self.fov_um is None: + # Calculate FOV from shape and pixel size + self.fov_um = ( + self.shape_px[0] * self.pixel_size_um, + self.shape_px[1] * self.pixel_size_um + ) + + if self.channel_name is None: + self.channel_name = self.name + + def update(self, **kwargs): + """Update context fields.""" + for key, value in kwargs.items(): + if hasattr(self, key): + setattr(self, key, value) + self.last_update = time.time() + + def to_dict(self) -> Dict[str, Any]: + """Export as dictionary.""" + return { + 'name': self.name, + 'shape_px': self.shape_px, + 'pixel_size_um': self.pixel_size_um, + 'fov_um': self.fov_um, + 'dtype': self.dtype, + 'binning': self.binning, + 'roi': self.roi, + 'channel_name': self.channel_name, + 'channel_color': self.channel_color, + 'wavelength_nm': self.wavelength_nm, + 'exposure_ms': self.exposure_ms, + 'gain': self.gain, + 'temperature_c': self.temperature_c, + 'bit_depth': self.bit_depth, + 'frame_number': self.frame_number, + 'frame_timestamp': self.frame_timestamp, + 'is_rgb': self.is_rgb, + 'objective_name': self.objective_name, + 'objective_magnification': self.objective_magnification, + 'objective_na': self.objective_na, + 'last_update': self.last_update, + } + + def to_ome_pixels(self, size_z: int = 1, size_t: int = 1, size_c: int = 1) -> 'Pixels': + """ + Generate OME Pixels object for this detector. + + Args: + size_z: Number of Z planes + size_t: Number of time points + size_c: Number of channels + + Returns: + OME Pixels object + """ + if not HAS_OME_TYPES: + raise ImportError("ome-types is required for OME metadata generation") + + # Map numpy dtype to OME PixelsType + dtype_map = { + 'uint8': PixelsType.UINT8, + 'uint16': PixelsType.UINT16, + 'uint32': PixelsType.UINT32, + 'int8': PixelsType.INT8, + 'int16': PixelsType.INT16, + 'int32': PixelsType.INT32, + 'float32': PixelsType.FLOAT, + 'float64': PixelsType.DOUBLE, + } + pixel_type = dtype_map.get(self.dtype, PixelsType.UINT16) + + # Create Pixels with physical dimensions + pixels = Pixels( + id=f"Pixels:{self.name}", + dimension_order="XYZCT", # Standard order + type=pixel_type, + size_x=self.shape_px[0], + size_y=self.shape_px[1], + size_z=size_z, + size_c=size_c, + size_t=size_t, + physical_size_x=self.pixel_size_um, + physical_size_x_unit=UnitsLength.MICROMETER, + physical_size_y=self.pixel_size_um, + physical_size_y_unit=UnitsLength.MICROMETER, + ) + + # Add channels + for c in range(size_c): + channel = Channel( + id=f"Channel:{self.name}:{c}", + name=self.channel_name if size_c == 1 else f"{self.channel_name}_{c}", + samples_per_pixel=1, + ) + if self.channel_color: + # Parse hex color to RGB with validation + try: + # Ensure it's a valid 6-character hex string + color_str = str(self.channel_color).lstrip('#') + if len(color_str) == 6 and all(c in '0123456789ABCDEFabcdef' for c in color_str): + color_int = int(color_str, 16) + channel.color = color_int + else: + # Invalid color format, skip silently + pass + except (ValueError, TypeError) as e: + # Log warning but don't fail + import logging + logging.getLogger(__name__).warning(f"Invalid channel color format: {self.channel_color}, error: {e}") + pass + if self.wavelength_nm: + channel.emission_wavelength = self.wavelength_nm + channel.emission_wavelength_unit = UnitsLength.NANOMETER + pixels.channels.append(channel) + + return pixels + + +@dataclass +class FrameEvent: + """ + Per-frame metadata event. + + Captures metadata at the time of frame acquisition, + ensuring alignment with actual image data. + """ + frame_number: int + timestamp: float = field(default_factory=time.time) + detector_name: Optional[str] = None + + # Positional metadata + stage_x_um: Optional[float] = None + stage_y_um: Optional[float] = None + stage_z_um: Optional[float] = None + + # Acquisition settings at trigger time + exposure_ms: Optional[float] = None + laser_power_mw: Optional[float] = None + + # Additional metadata + metadata: Dict[str, Any] = field(default_factory=dict) + + def to_dict(self) -> Dict[str, Any]: + """Export as dictionary.""" + return { + 'frame_number': self.frame_number, + 'timestamp': self.timestamp, + 'detector_name': self.detector_name, + 'stage_x_um': self.stage_x_um, + 'stage_y_um': self.stage_y_um, + 'stage_z_um': self.stage_z_um, + 'exposure_ms': self.exposure_ms, + 'laser_power_mw': self.laser_power_mw, + 'metadata': self.metadata, + } + + def to_ome_plane(self, the_z: int = 0, the_c: int = 0, the_t: int = 0) -> 'Plane': + """ + Generate OME Plane object for this frame. + + Args: + the_z: Z index + the_c: Channel index + the_t: Time index + + Returns: + OME Plane object + """ + if not HAS_OME_TYPES: + raise ImportError("ome-types is required for OME metadata generation") + + plane = Plane( + the_z=the_z, + the_c=the_c, + the_t=the_t, + ) + + # Add positional metadata + if self.stage_x_um is not None: + plane.position_x = self.stage_x_um + plane.position_x_unit = UnitsLength.MICROMETER + if self.stage_y_um is not None: + plane.position_y = self.stage_y_um + plane.position_y_unit = UnitsLength.MICROMETER + if self.stage_z_um is not None: + plane.position_z = self.stage_z_um + plane.position_z_unit = UnitsLength.MICROMETER + + # Add timing + if self.timestamp: + plane.delta_t = self.timestamp + plane.delta_t_unit = UnitsTime.SECOND + + if self.exposure_ms is not None: + plane.exposure_time = self.exposure_ms / 1000.0 # Convert to seconds + plane.exposure_time_unit = UnitsTime.SECOND + + return plane + + +class MetadataHub: + """ + Central metadata aggregator for ImSwitch. + + Provides: + - Global metadata storage (hardware state) + - Per-detector metadata contexts + - Per-frame event queues for acquisition alignment + - OME-types generation for writers + + Thread-safe for concurrent access from multiple controllers. + """ + + def __init__(self): + self._lock = threading.RLock() + + # Global metadata store: key -> SharedAttrValue + self._global_metadata: Dict[Tuple[str, ...], SharedAttrValue] = {} + + # Detector contexts: detector_name -> DetectorContext + self._detector_contexts: Dict[str, DetectorContext] = {} + + # Per-frame event queues: detector_name -> deque[FrameEvent] + self._frame_events: Dict[str, deque] = defaultdict(lambda: deque(maxlen=10000)) + + # Frame counters: detector_name -> int + self._frame_counters: Dict[str, int] = defaultdict(int) + + def update(self, key: Tuple[str, ...], value: Any, + ts: Optional[float] = None, + units: Optional[str] = None, + source: Optional[str] = None): + """ + Update global metadata. + + Args: + key: Metadata key tuple + value: Metadata value + ts: Optional timestamp (defaults to now) + units: Optional units string + source: Optional source identifier + """ + with self._lock: + # Normalize using schema + attr_value = MetadataSchema.normalize_value(key, value, timestamp=ts, source=source) + if units: + attr_value.units = units + self._global_metadata[key] = attr_value + + def get(self, key: Tuple[str, ...]) -> Optional[SharedAttrValue]: + """Get a metadata value.""" + with self._lock: + return self._global_metadata.get(key) + + def get_latest(self, flat: bool = False, + filter_category: Optional[str] = None) -> Dict: + """ + Get latest global metadata. + + Args: + flat: If True, return flat dict with ':' separated keys + filter_category: Optional category to filter by + + Returns: + Dictionary of metadata + """ + with self._lock: + result = {} + for key, attr_value in self._global_metadata.items(): + # Apply filter if specified + if filter_category and key[0] != filter_category: + continue + + if flat: + # Flatten key to string + key_str = ':'.join(key) + result[key_str] = { + 'value': attr_value.value, + 'timestamp': attr_value.timestamp, + 'units': attr_value.units, + 'source': attr_value.source, + } + else: + # Nested dict + current = result + for i, segment in enumerate(key[:-1]): + if segment not in current: + current[segment] = {} + current = current[segment] + current[key[-1]] = { + 'value': attr_value.value, + 'timestamp': attr_value.timestamp, + 'units': attr_value.units, + 'source': attr_value.source, + } + return result + + def register_detector(self, detector_name: str, context: DetectorContext): + """Register a detector context.""" + with self._lock: + self._detector_contexts[detector_name] = context + + def get_detector(self, detector_name: str) -> Optional[DetectorContext]: + """Get a detector context.""" + with self._lock: + return self._detector_contexts.get(detector_name) + + def update_detector(self, detector_name: str, **kwargs): + """Update detector context fields.""" + with self._lock: + if detector_name in self._detector_contexts: + self._detector_contexts[detector_name].update(**kwargs) + + def snapshot_global(self) -> Dict[str, Any]: + """Get a snapshot of all global metadata.""" + return self.get_latest(flat=False) + + def snapshot_detector(self, detector_name: str) -> Dict[str, Any]: + """ + Get a snapshot of detector-specific metadata. + + Returns: + Dictionary with detector context and relevant global metadata + """ + with self._lock: + result = {} + + # Add detector context + if detector_name in self._detector_contexts: + result['detector_context'] = self._detector_contexts[detector_name].to_dict() + + # Add relevant global metadata for this detector + detector_metadata = {} + for key, attr_value in self._global_metadata.items(): + # Check if this metadata is for this detector + if len(key) >= 2 and key[1] == detector_name: + detector_metadata[':'.join(key)] = { + 'value': attr_value.value, + 'timestamp': attr_value.timestamp, + 'units': attr_value.units, + } + if detector_metadata: + result['metadata'] = detector_metadata + + return result + + def push_frame_event(self, detector_name: str, event: Optional[FrameEvent] = None, **kwargs): + """ + Push a frame event for a detector. + + Args: + detector_name: Detector name + event: Optional pre-constructed FrameEvent + **kwargs: If event is None, construct FrameEvent from kwargs + """ + with self._lock: + if event is None: + # Auto-increment frame counter + frame_number = self._frame_counters[detector_name] + self._frame_counters[detector_name] += 1 + + event = FrameEvent( + frame_number=frame_number, + detector_name=detector_name, + **kwargs + ) + + self._frame_events[detector_name].append(event) + + def pop_frame_events(self, detector_name: str, n: int) -> List[FrameEvent]: + """ + Pop n frame events for a detector. + + Args: + detector_name: Detector name + n: Number of events to pop + + Returns: + List of FrameEvent objects (may be fewer than n if queue is short) + """ + with self._lock: + events = [] + queue = self._frame_events[detector_name] + for _ in range(min(n, len(queue))): + if queue: + events.append(queue.popleft()) + return events + + def peek_frame_events(self, detector_name: str, n: int = None) -> List[FrameEvent]: + """ + Peek at frame events without removing them. + + Args: + detector_name: Detector name + n: Number of events to peek (None = all) + + Returns: + List of FrameEvent objects + """ + with self._lock: + queue = self._frame_events[detector_name] + if n is None: + return list(queue) + else: + return list(queue)[:n] + + def clear_frame_events(self, detector_name: str): + """Clear all frame events for a detector.""" + with self._lock: + self._frame_events[detector_name].clear() + self._frame_counters[detector_name] = 0 + + def create_pre_trigger_snapshot(self, detector_name: str) -> Dict[str, Any]: + """ + Create a pre-trigger snapshot of the current hardware state. + + This method should be called BEFORE triggering image acquisition + to capture the hardware state at the moment of trigger, avoiding + race conditions where state changes between trigger and frame receipt. + + Following the pattern from octopi-research (CaptureInfo set before trigger). + + Args: + detector_name: Detector name + + Returns: + Dictionary with current hardware state (positions, illumination, etc.) + """ + with self._lock: + snapshot = { + 'timestamp': time.time(), + 'detector_name': detector_name, + 'global_metadata': {}, + 'detector_context': None, + } + + # Capture global metadata (positioners, illumination, objective) + for key, attr_value in self._global_metadata.items(): + key_str = ':'.join(key) + snapshot['global_metadata'][key_str] = { + 'value': attr_value.value, + 'timestamp': attr_value.timestamp, + 'units': attr_value.units, + } + + # Capture detector context + if detector_name in self._detector_contexts: + snapshot['detector_context'] = self._detector_contexts[detector_name].to_dict() + + return snapshot + + def create_frame_event_from_snapshot(self, snapshot: Dict[str, Any], + frame_number: int = None, + hw_frame_number: int = None) -> FrameEvent: + """ + Create a FrameEvent from a pre-trigger snapshot. + + This method should be called when a frame is received, using the + snapshot that was captured before the trigger. This ensures metadata + alignment with actual image data. + + Args: + snapshot: Pre-trigger snapshot from create_pre_trigger_snapshot + frame_number: Optional override for frame number + hw_frame_number: Hardware frame number from camera + + Returns: + FrameEvent with aligned metadata + """ + with self._lock: + detector_name = snapshot.get('detector_name') + + if frame_number is None: + frame_number = self._frame_counters[detector_name] + self._frame_counters[detector_name] += 1 + + # Extract position from global metadata + global_meta = snapshot.get('global_metadata', {}) + + # Find stage positions (look for Positioner:*:*:Position keys) + stage_x = None + stage_y = None + stage_z = None + + for key, value_dict in global_meta.items(): + parts = key.split(':') + if len(parts) >= 4 and parts[0] == 'Positioner' and parts[3] == 'Position': + axis = parts[2] + pos = value_dict.get('value') + if axis == 'X': + stage_x = pos + elif axis == 'Y': + stage_y = pos + elif axis == 'Z': + stage_z = pos + + # Get exposure from detector context + ctx = snapshot.get('detector_context', {}) + exposure_ms = ctx.get('exposure_ms') + + # Create event + event = FrameEvent( + frame_number=frame_number, + timestamp=snapshot.get('timestamp', time.time()), + detector_name=detector_name, + stage_x_um=stage_x, + stage_y_um=stage_y, + stage_z_um=stage_z, + exposure_ms=exposure_ms, + metadata={ + 'hw_frame_number': hw_frame_number, + 'pre_trigger_snapshot': True, + } + ) + + return event + + def get_frame_events(self, detector_name: str, limit: int = None) -> List[FrameEvent]: + """ + Get frame events for a detector without removing them. + + Args: + detector_name: Detector name + limit: Maximum number of events to return (None = all) + + Returns: + List of FrameEvent objects + """ + return self.peek_frame_events(detector_name, n=limit) + + def export_detector_contexts(self) -> Dict[str, Dict[str, Any]]: + """ + Export all detector contexts as dictionaries. + + Returns: + Dictionary mapping detector names to their context dicts + """ + with self._lock: + return { + name: ctx.to_dict() + for name, ctx in self._detector_contexts.items() + } + + def to_json(self) -> str: + """ + Export complete metadata state as JSON string. + + Returns: + JSON string with global metadata and detector contexts + """ + import json + import numpy as np + + def json_serializer(obj): + """Custom JSON serializer for special types.""" + if isinstance(obj, np.ndarray): + return obj.tolist() + elif isinstance(obj, (np.integer, np.floating)): + return obj.item() + elif hasattr(obj, '__dict__'): + return str(obj) + else: + return str(obj) + + with self._lock: + data = { + 'timestamp': time.time(), + 'global_metadata': self.get_latest(flat=True), + 'detector_contexts': self.export_detector_contexts(), + 'frame_counts': dict(self._frame_counters), + } + return json.dumps(data, default=json_serializer, indent=2) + + def to_ome(self, detector_names: Optional[List[str]] = None) -> Optional['OME']: + """ + Generate OME metadata object for registered detectors. + + Args: + detector_names: List of detector names (None = all) + + Returns: + OME object or None if ome-types not available + """ + if not HAS_OME_TYPES: + return None + + with self._lock: + if detector_names is None: + detector_names = list(self._detector_contexts.keys()) + + ome = OME() + + # Create an image for each detector + for det_name in detector_names: + if det_name not in self._detector_contexts: + continue + + context = self._detector_contexts[det_name] + + # Create image + image = Image( + id=f"Image:{det_name}", + name=det_name, + ) + + # Add pixels + image.pixels = context.to_ome_pixels() + + # Add planes from frame events if available + events = self.peek_frame_events(det_name) + for event in events: + plane = event.to_ome_plane(the_t=event.frame_number) + image.pixels.planes.append(plane) + + ome.images.append(image) + + return ome + + +# Copyright (C) 2020-2024 ImSwitch developers +# This file is part of ImSwitch. +# +# ImSwitch is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ImSwitch is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . diff --git a/imswitch/imcontrol/model/metadata/schema.py b/imswitch/imcontrol/model/metadata/schema.py new file mode 100644 index 000000000..8377a7932 --- /dev/null +++ b/imswitch/imcontrol/model/metadata/schema.py @@ -0,0 +1,259 @@ +""" +Metadata schema and standardization for ImSwitch. + +Provides standardized metadata keys, units, and value normalization +to ensure consistency across controllers and managers. +""" + +from dataclasses import dataclass, field +from enum import Enum +from typing import Any, Optional, Dict, Tuple +import time + + +class MetadataCategory(str, Enum): + """Standard metadata categories following Micro-Manager conventions.""" + POSITIONER = "Positioner" + ILLUMINATION = "Illumination" + OBJECTIVE = "Objective" + DETECTOR = "Detector" + ENVIRONMENT = "Environment" + SYSTEM = "System" + RECORDING = "Recording" + + +@dataclass +class SharedAttrValue: + """ + Typed, timestamped metadata value wrapper. + + Similar to Micro-Manager's PropertyValue but adapted for ImSwitch. + """ + value: Any + timestamp: float = field(default_factory=time.time) + units: Optional[str] = None + dtype: Optional[str] = None + source: Optional[str] = None # Controller/Manager name + valid: bool = True + + def __repr__(self): + return f"SharedAttrValue({self.value} {self.units or ''} @ {self.timestamp:.3f})" + + +class MetadataSchema: + """ + Schema registry for standardized metadata keys and units. + + Provides validation and normalization of metadata keys/values + following OME-types and Micro-Manager conventions. + """ + + # Standard field definitions: (units, dtype, description) + POSITIONER_FIELDS = { + 'Position': ('um', 'float', 'Position in micrometers'), + 'PositionUm': ('um', 'float', 'Position in micrometers (alias)'), + 'Speed': ('um/s', 'float', 'Speed in micrometers per second'), + 'SpeedUmS': ('um/s', 'float', 'Speed in micrometers per second (alias)'), + 'IsHomed': ('', 'bool', 'Whether axis is homed'), + 'Home': ('', 'bool', 'Homing status'), + 'Stop': ('', 'bool', 'Stop status'), + 'IsMoving': ('', 'bool', 'Whether axis is moving'), + 'SetpointUm': ('um', 'float', 'Target position in micrometers'), + 'AccelerationUmS2': ('um/s^2', 'float', 'Acceleration'), + } + + ILLUMINATION_FIELDS = { + 'Enabled': ('', 'bool', 'Whether illumination is enabled'), + 'WavelengthNm': ('nm', 'float', 'Wavelength in nanometers'), + 'PowerMw': ('mW', 'float', 'Power in milliwatts'), + 'CurrentMa': ('mA', 'float', 'Current in milliamps'), + 'Mode': ('', 'str', 'Operating mode'), + 'IntensityPercent': ('%', 'float', 'Intensity as percentage'), + 'Value': ('', 'float', 'Current value (units depend on device)'), + 'ModulationEnabled': ('', 'bool', 'Whether modulation is enabled'), + 'Frequency': ('Hz', 'float', 'Modulation frequency'), + 'DutyCycle': ('%', 'float', 'Modulation duty cycle'), + } + + OBJECTIVE_FIELDS = { + 'Name': ('', 'str', 'Objective name'), + 'Magnification': ('', 'float', 'Magnification factor'), + 'NA': ('', 'float', 'Numerical aperture'), + 'Immersion': ('', 'str', 'Immersion medium'), + 'TurretIndex': ('', 'int', 'Turret position'), + 'WorkingDistanceUm': ('um', 'float', 'Working distance'), + } + + DETECTOR_FIELDS = { + 'ExposureMs': ('ms', 'float', 'Exposure time in milliseconds'), + 'Gain': ('', 'float', 'Detector gain'), + 'Binning': ('', 'int', 'Binning factor'), + 'ROI': ('', 'tuple', 'Region of interest (x, y, w, h)'), + 'TemperatureC': ('C', 'float', 'Detector temperature in Celsius'), + 'PixelSizeUm': ('um', 'float', 'Physical pixel size in micrometers'), + 'ShapePx': ('px', 'tuple', 'Detector shape in pixels (width, height)'), + 'BitDepth': ('', 'int', 'Bit depth'), + 'ReadoutMode': ('', 'str', 'Readout mode'), + 'IsRGB': ('', 'bool', 'Whether detector outputs RGB images'), + 'FrameNumber': ('', 'int', 'Frame number from camera hardware'), + 'FrameTimestamp': ('s', 'float', 'Frame timestamp from camera'), + 'FOVUm': ('um', 'tuple', 'Field of view in micrometers (width, height)'), + } + + ENVIRONMENT_FIELDS = { + 'TemperatureC': ('C', 'float', 'Temperature in Celsius'), + 'HumidityPercent': ('%', 'float', 'Relative humidity'), + 'CO2Percent': ('%', 'float', 'CO2 concentration'), + 'PressurePa': ('Pa', 'float', 'Pressure in Pascals'), + } + + SYSTEM_FIELDS = { + 'Timestamp': ('s', 'float', 'Unix timestamp'), + 'FrameNumber': ('', 'int', 'Frame number in sequence'), + 'ElapsedTimeS': ('s', 'float', 'Elapsed time in seconds'), + } + + # Map categories to their field definitions + CATEGORY_FIELDS = { + MetadataCategory.POSITIONER: POSITIONER_FIELDS, + MetadataCategory.ILLUMINATION: ILLUMINATION_FIELDS, + MetadataCategory.OBJECTIVE: OBJECTIVE_FIELDS, + MetadataCategory.DETECTOR: DETECTOR_FIELDS, + MetadataCategory.ENVIRONMENT: ENVIRONMENT_FIELDS, + MetadataCategory.SYSTEM: SYSTEM_FIELDS, + } + + @classmethod + def validate_key(cls, key: Tuple[str, ...]) -> bool: + """ + Validate a metadata key tuple. + + Args: + key: Tuple like ('Positioner', 'Stage', 'X', 'PositionUm') + Format: (category, device, axis_or_sub, field) + Minimum 2 elements for compatibility, but 4 is recommended. + + Returns: + True if key is valid + """ + if not isinstance(key, tuple) or len(key) < 2: + return False + + # Check if category is recognized + category = key[0] + try: + MetadataCategory(category) + except ValueError: + return False + + return True + + @classmethod + def get_field_info(cls, category: str, field: str) -> Optional[Tuple[str, str, str]]: + """ + Get field information (units, dtype, description). + + Args: + category: Metadata category + field: Field name + + Returns: + (units, dtype, description) or None if not found + """ + try: + cat = MetadataCategory(category) + fields = cls.CATEGORY_FIELDS.get(cat, {}) + return fields.get(field) + except (ValueError, KeyError): + return None + + @classmethod + def normalize_value(cls, key: Tuple[str, ...], value: Any, + timestamp: Optional[float] = None, + source: Optional[str] = None) -> SharedAttrValue: + """ + Normalize a metadata value to a SharedAttrValue. + + Args: + key: Metadata key tuple + value: Raw value + timestamp: Optional timestamp (defaults to now) + source: Optional source identifier + + Returns: + SharedAttrValue with units and type information + """ + if timestamp is None: + timestamp = time.time() + + # If already a SharedAttrValue, return it + if isinstance(value, SharedAttrValue): + return value + + # Extract category and field from key + if len(key) >= 4: + category, device, axis_or_sub, field = key[0], key[1], key[2], key[3] + elif len(key) >= 2: + category, field = key[0], key[-1] + device, axis_or_sub = None, None + else: + # Invalid key, return raw value wrapped + return SharedAttrValue(value=value, timestamp=timestamp, source=source) + + # Get field info from schema + field_info = cls.get_field_info(category, field) + if field_info: + units, dtype, description = field_info + return SharedAttrValue( + value=value, + timestamp=timestamp, + units=units if units else None, + dtype=dtype, + source=source, + valid=True + ) + else: + # Unknown field, but still wrap it + return SharedAttrValue( + value=value, + timestamp=timestamp, + source=source, + valid=True + ) + + @classmethod + def make_key(cls, category: MetadataCategory, device: str, + axis_or_sub: Optional[str], field: str) -> Tuple[str, ...]: + """ + Construct a standardized metadata key. + + Args: + category: Metadata category + device: Device name + axis_or_sub: Axis name or sub-component (can be None) + field: Field name + + Returns: + Tuple key + """ + if axis_or_sub: + return (category.value, device, axis_or_sub, field) + else: + return (category.value, device, field) + + +# Copyright (C) 2020-2024 ImSwitch developers +# This file is part of ImSwitch. +# +# ImSwitch is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ImSwitch is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . diff --git a/imswitch/imcontrol/model/metadata/sharedattrs_bridge.py b/imswitch/imcontrol/model/metadata/sharedattrs_bridge.py new file mode 100644 index 000000000..39b1eb8a5 --- /dev/null +++ b/imswitch/imcontrol/model/metadata/sharedattrs_bridge.py @@ -0,0 +1,102 @@ +""" +Bridge between legacy SharedAttributes and MetadataHub. + +Subscribes to SharedAttributes signal updates and forwards them to the +MetadataHub with validation and normalization. +""" + +from typing import Optional, List +import logging + +from imswitch.imcommon.model import SharedAttributes +from .metadata_hub import MetadataHub +from .schema import MetadataSchema, MetadataCategory + + +logger = logging.getLogger(__name__) + + +class SharedAttrsMetadataBridge: + """ + Bridge to connect SharedAttributes to MetadataHub. + + Listens to sharedAttrs.sigAttributeSet signal and pushes + validated/normalized updates to the hub. + + This provides backwards compatibility while enabling the new + metadata infrastructure. + """ + + def __init__(self, + shared_attrs: SharedAttributes, + hub: MetadataHub, + categories: Optional[List[str]] = None): + """ + Initialize the bridge. + + Args: + shared_attrs: SharedAttributes instance to monitor + hub: MetadataHub instance to update + categories: Optional list of categories to forward (None = all) + """ + self.shared_attrs = shared_attrs + self.hub = hub + self.categories = set(categories) if categories else None + + # Subscribe to attribute changes + try: + self.shared_attrs.sigAttributeSet.connect(self._on_attribute_set) + logger.info("SharedAttrsMetadataBridge initialized") + except Exception as e: + logger.error(f"Failed to connect to SharedAttributes signal: {e}") + raise + + def _on_attribute_set(self, key, value): + """ + Handle SharedAttributes update signal. + + Args: + key: Tuple key from SharedAttributes + value: Raw value + """ + try: + # Validate key + if not MetadataSchema.validate_key(key): + logger.debug(f"Skipping invalid key: {key}") + return + + # Filter by category if specified + if self.categories and key[0] not in self.categories: + return + + # Normalize and forward to hub + # Hub will handle schema normalization internally + self.hub.update(key, value, source='SharedAttributes') + + except Exception as e: + logger.error(f"Error bridging attribute {key}: {e}") + + def disconnect(self): + """Disconnect from SharedAttributes signals.""" + try: + self.shared_attrs.sigAttributeSet.disconnect(self._on_attribute_set) + logger.info("SharedAttrsMetadataBridge disconnected") + except Exception as e: + logger.error(f"Error disconnecting bridge: {e}") + + +# Copyright (C) 2020-2024 ImSwitch developers +# This file is part of ImSwitch. +# +# ImSwitch is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ImSwitch is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . diff --git a/imswitch/imcontrol/model/writers/__init__.py b/imswitch/imcontrol/model/writers/__init__.py new file mode 100644 index 000000000..d81dc5865 --- /dev/null +++ b/imswitch/imcontrol/model/writers/__init__.py @@ -0,0 +1,32 @@ +""" +Writers package for ImSwitch - Unified I/O for all file formats. + +This package provides a common interface for writing acquisition data +in various formats (OME-TIFF, OME-Zarr, PNG, JPG, MP4, etc.). + +All writers implement the WriterBase interface and can be used from +both RecordingManager and ExperimentController. +""" + +from .base import WriterBase, SessionContext, WriterCapabilities, DetectorContext, FrameEvent +from .registry import WriterRegistry, get_writer, register_writer +from .uuid_gen import compute_content_id, compute_uuid5, generate_session_uuid +from .ome_tiff_writer import OMETiffWriter +from .ome_zarr_writer import OMEZarrWriter + +__all__ = [ + 'WriterBase', + 'SessionContext', + 'DetectorContext', + 'FrameEvent', + 'WriterCapabilities', + 'WriterRegistry', + 'get_writer', + 'register_writer', + 'compute_content_id', + 'compute_uuid5', + 'generate_session_uuid', + 'OMETiffWriter', + 'OMEZarrWriter', +] + diff --git a/imswitch/imcontrol/model/writers/base.py b/imswitch/imcontrol/model/writers/base.py new file mode 100644 index 000000000..7fa847c70 --- /dev/null +++ b/imswitch/imcontrol/model/writers/base.py @@ -0,0 +1,290 @@ +""" +Base writer interface and context dataclasses. + +Provides the common interface that all writers must implement, +along with context objects for session, detector, and frame metadata. +""" + +from abc import ABC, abstractmethod +from dataclasses import dataclass, field +from typing import Any, Dict, List, Optional, Tuple +from enum import Enum +import numpy as np +import time + + +class WriterCapabilities(Enum): + """Writer capability flags.""" + SINGLE_FILE = "single_file" # All data in one file + MULTI_FILE = "multi_file" # Multiple files per session + STREAMING = "streaming" # Can write while acquiring + METADATA_RICH = "metadata_rich" # Supports full OME metadata + MULTI_DETECTOR = "multi_detector" # Supports multiple detectors + MULTI_CHANNEL = "multi_channel" # Supports multiple channels + TIME_SERIES = "time_series" # Supports time-lapse + Z_STACK = "z_stack" # Supports z-stacks + TILED = "tiled" # Supports tiled/mosaic images + + +@dataclass +class SessionContext: + """ + Session-level metadata for a recording/acquisition session. + + Contains metadata that applies to the entire session, such as + instrument configuration, user info, and global acquisition parameters. + """ + session_id: str # Unique session identifier (UUID) + start_time: float = field(default_factory=time.time) + base_path: str = "" + + # User-provided metadata + project: Optional[str] = None + experiment: Optional[str] = None + sample: Optional[str] = None + user: Optional[str] = None + description: Optional[str] = None + + # Acquisition parameters + n_time_points: int = 1 + n_z_planes: int = 1 + n_channels: int = 1 + time_interval_s: Optional[float] = None + z_step_um: Optional[float] = None + + # Instrument configuration (from MetadataHub) + objectives: Dict[str, Any] = field(default_factory=dict) + light_sources: Dict[str, Any] = field(default_factory=dict) + + # Additional metadata + metadata: Dict[str, Any] = field(default_factory=dict) + + def to_dict(self) -> Dict[str, Any]: + """Export as dictionary.""" + return { + 'session_id': self.session_id, + 'start_time': self.start_time, + 'base_path': self.base_path, + 'project': self.project, + 'experiment': self.experiment, + 'sample': self.sample, + 'user': self.user, + 'description': self.description, + 'n_time_points': self.n_time_points, + 'n_z_planes': self.n_z_planes, + 'n_channels': self.n_channels, + 'time_interval_s': self.time_interval_s, + 'z_step_um': self.z_step_um, + 'objectives': self.objectives, + 'light_sources': self.light_sources, + 'metadata': self.metadata, + } + + +@dataclass +class DetectorContext: + """ + Detector-specific metadata context. + + This is a simplified version that can be created from the MetadataHub + DetectorContext or used standalone by writers. + """ + name: str + shape_px: Tuple[int, int] # (width, height) + pixel_size_um: float + dtype: str = 'uint16' + + # Optional fields + fov_um: Optional[Tuple[float, float]] = None + binning: int = 1 + roi: Optional[Tuple[int, int, int, int]] = None + channel_name: Optional[str] = None + channel_color: Optional[str] = None + wavelength_nm: Optional[float] = None + exposure_ms: Optional[float] = None + gain: Optional[float] = None + + def to_dict(self) -> Dict[str, Any]: + """Export as dictionary.""" + return { + 'name': self.name, + 'shape_px': self.shape_px, + 'pixel_size_um': self.pixel_size_um, + 'dtype': self.dtype, + 'fov_um': self.fov_um, + 'binning': self.binning, + 'roi': self.roi, + 'channel_name': self.channel_name, + 'channel_color': self.channel_color, + 'wavelength_nm': self.wavelength_nm, + 'exposure_ms': self.exposure_ms, + 'gain': self.gain, + } + + +@dataclass +class FrameEvent: + """ + Per-frame metadata event. + + Captures metadata at the time of frame acquisition. + Can be created from MetadataHub FrameEvent or standalone. + """ + frame_number: int + timestamp: float = field(default_factory=time.time) + detector_name: Optional[str] = None + + # Positional metadata + stage_x_um: Optional[float] = None + stage_y_um: Optional[float] = None + stage_z_um: Optional[float] = None + + # Acquisition settings + exposure_ms: Optional[float] = None + laser_power_mw: Optional[float] = None + + # Indices + t_index: int = 0 + z_index: int = 0 + c_index: int = 0 + + # Additional metadata + metadata: Dict[str, Any] = field(default_factory=dict) + + def to_dict(self) -> Dict[str, Any]: + """Export as dictionary.""" + return { + 'frame_number': self.frame_number, + 'timestamp': self.timestamp, + 'detector_name': self.detector_name, + 'stage_x_um': self.stage_x_um, + 'stage_y_um': self.stage_y_um, + 'stage_z_um': self.stage_z_um, + 'exposure_ms': self.exposure_ms, + 'laser_power_mw': self.laser_power_mw, + 't_index': self.t_index, + 'z_index': self.z_index, + 'c_index': self.c_index, + 'metadata': self.metadata, + } + + +class WriterBase(ABC): + """ + Base interface for all file format writers. + + Writers implement this interface to handle writing acquisition data + in various formats. They can be used from RecordingManager, + ExperimentController, or any other acquisition pipeline. + + Lifecycle: + 1. __init__(session_ctx) - Initialize with session metadata + 2. open(detectors) - Open files/datasets for detectors + 3. write(detector, frames, events) - Write frames with metadata (called multiple times) + 4. finalize() - Flush buffers, write final metadata + 5. close() - Close files and clean up + """ + + def __init__(self, session_ctx: SessionContext): + """ + Initialize writer with session context. + + Args: + session_ctx: Session-level metadata + """ + self.session_ctx = session_ctx + self._is_open = False + self._is_finalized = False + + @abstractmethod + def open(self, detectors: Dict[str, DetectorContext]) -> None: + """ + Open files/datasets for writing. + + Args: + detectors: Dictionary mapping detector name to DetectorContext + """ + pass + + @abstractmethod + def write(self, + detector_name: str, + frames: np.ndarray, + events: Optional[List[FrameEvent]] = None) -> None: + """ + Write frames for a detector. + + Args: + detector_name: Name of the detector + frames: Image data as numpy array (can be 2D, 3D, 4D, etc.) + events: Optional list of FrameEvent objects (one per frame) + """ + pass + + @abstractmethod + def finalize(self) -> None: + """ + Finalize writing (flush buffers, write final metadata). + + Called after all frames have been written but before close(). + """ + pass + + @abstractmethod + def close(self) -> None: + """ + Close files and clean up resources. + + Should be idempotent (safe to call multiple times). + """ + pass + + @classmethod + @abstractmethod + def get_capabilities(cls) -> List[WriterCapabilities]: + """ + Return list of capabilities supported by this writer. + + Returns: + List of WriterCapabilities flags + """ + pass + + @classmethod + @abstractmethod + def get_file_extension(cls) -> str: + """ + Return the primary file extension for this writer. + + Returns: + Extension string (e.g., '.ome.tiff', '.zarr', '.png') + """ + pass + + def __enter__(self): + """Context manager support.""" + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """Context manager support.""" + if not self._is_finalized: + self.finalize() + self.close() + return False + + +# Copyright (C) 2020-2024 ImSwitch developers +# This file is part of ImSwitch. +# +# ImSwitch is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ImSwitch is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . diff --git a/imswitch/imcontrol/model/writers/ome_tiff_writer.py b/imswitch/imcontrol/model/writers/ome_tiff_writer.py new file mode 100644 index 000000000..efe218f8c --- /dev/null +++ b/imswitch/imcontrol/model/writers/ome_tiff_writer.py @@ -0,0 +1,288 @@ +""" +OME-TIFF writer with full OME-XML metadata support. + +Writes acquisition data as OME-TIFF files with standards-compliant +OME-XML metadata embedded in the TIFF tags. +""" + +import os +import time +import threading +import tifffile +from collections import deque +from typing import Dict, List, Optional +import numpy as np +import logging + +from .base import WriterBase, SessionContext, DetectorContext, FrameEvent, WriterCapabilities +from .registry import register_writer +from .uuid_gen import compute_content_id + +logger = logging.getLogger(__name__) + + +@register_writer('OME_TIFF') +class OMETiffWriter(WriterBase): + """ + OME-TIFF writer with background writing and OME-XML metadata. + + Features: + - Asynchronous writing via background thread + - Full OME-XML metadata generation + - Per-frame positional metadata + - BigTIFF support for large datasets + - RGB and grayscale support + """ + + def __init__(self, session_ctx: SessionContext, + bigtiff: bool = True, + append_mode: bool = True): + """ + Initialize OME-TIFF writer. + + Args: + session_ctx: Session metadata + bigtiff: Use BigTIFF format for large files + append_mode: Write all frames to single file (True) or multiple files (False) + """ + super().__init__(session_ctx) + self.bigtiff = bigtiff + self.append_mode = append_mode + self.detectors: Dict[str, DetectorContext] = {} + self.file_paths: Dict[str, str] = {} + self.queues: Dict[str, deque] = {} + self.threads: Dict[str, threading.Thread] = {} + self.stop_events: Dict[str, threading.Event] = {} + self.locks: Dict[str, threading.Lock] = {} + + @classmethod + def get_capabilities(cls) -> List[WriterCapabilities]: + return [ + WriterCapabilities.STREAMING, + WriterCapabilities.METADATA_RICH, + WriterCapabilities.MULTI_DETECTOR, + WriterCapabilities.TIME_SERIES, + WriterCapabilities.Z_STACK, + ] + + @classmethod + def get_file_extension(cls) -> str: + return '.ome.tiff' + + def open(self, detectors: Dict[str, DetectorContext]) -> None: + """Open OME-TIFF files for each detector.""" + if self._is_open: + logger.warning("Writer already open") + return + + self.detectors = detectors + + # Ensure output directory exists + if self.session_ctx.base_path: + os.makedirs(self.session_ctx.base_path, exist_ok=True) + + # Set up file paths and threads for each detector + for det_name, det_ctx in detectors.items(): + # Generate file path + filename = f"{self.session_ctx.session_id}_{det_name}.ome.tiff" + if self.session_ctx.base_path: + filepath = os.path.join(self.session_ctx.base_path, filename) + else: + filepath = filename + + self.file_paths[det_name] = filepath + self.queues[det_name] = deque() + self.locks[det_name] = threading.Lock() + self.stop_events[det_name] = threading.Event() + + # Start background writer thread + thread = threading.Thread( + target=self._writer_loop, + args=(det_name,), + daemon=True + ) + thread.start() + self.threads[det_name] = thread + + logger.info(f"Opened OME-TIFF writer for {det_name}: {filepath}") + + self._is_open = True + + def write(self, + detector_name: str, + frames: np.ndarray, + events: Optional[List[FrameEvent]] = None) -> None: + """ + Write frames for a detector. + + Args: + detector_name: Detector name + frames: Image data (2D, 3D, or 4D array) + events: Optional frame events for metadata + """ + if not self._is_open: + raise RuntimeError("Writer not open") + + if detector_name not in self.detectors: + raise ValueError(f"Unknown detector: {detector_name}") + + # Handle different frame dimensions + if frames.ndim == 2: + # Single frame + frames = frames[np.newaxis, ...] + elif frames.ndim == 3: + # Stack of frames (already correct) + pass + elif frames.ndim == 4: + # Multi-channel or 3D stack - flatten to 3D + frames = frames.reshape(-1, frames.shape[-2], frames.shape[-1]) + + # Enqueue frames with metadata + for i, frame in enumerate(frames): + event = events[i] if events and i < len(events) else None + metadata = self._build_frame_metadata(detector_name, event) + + with self.locks[detector_name]: + self.queues[detector_name].append((frame, metadata)) + + def _build_frame_metadata(self, detector_name: str, event: Optional[FrameEvent]) -> Dict: + """Build OME metadata for a single frame.""" + det_ctx = self.detectors[detector_name] + + metadata = { + "Pixels": { + "PhysicalSizeX": det_ctx.pixel_size_um, + "PhysicalSizeXUnit": "µm", + "PhysicalSizeY": det_ctx.pixel_size_um, + "PhysicalSizeYUnit": "µm", + }, + } + + if event: + plane_metadata = {} + + if event.stage_x_um is not None: + plane_metadata["PositionX"] = event.stage_x_um + plane_metadata["PositionXUnit"] = "µm" + + if event.stage_y_um is not None: + plane_metadata["PositionY"] = event.stage_y_um + plane_metadata["PositionYUnit"] = "µm" + + if event.stage_z_um is not None: + plane_metadata["PositionZ"] = event.stage_z_um + plane_metadata["PositionZUnit"] = "µm" + + if event.exposure_ms is not None: + plane_metadata["ExposureTime"] = event.exposure_ms + plane_metadata["ExposureTimeUnit"] = "ms" + + if event.timestamp: + plane_metadata["DeltaT"] = event.timestamp - self.session_ctx.start_time + plane_metadata["DeltaTUnit"] = "s" + + if plane_metadata: + metadata["Plane"] = plane_metadata + + # Add session-level metadata + if self.session_ctx.project: + metadata["Project"] = self.session_ctx.project + if self.session_ctx.sample: + metadata["Sample"] = self.session_ctx.sample + if self.session_ctx.user: + metadata["User"] = self.session_ctx.user + + # Add content ID + content_id = compute_content_id({ + 'session_id': self.session_ctx.session_id, + 'detector': detector_name, + 'timestamp': time.time() + }) + metadata["ContentID"] = content_id + + return metadata + + def _writer_loop(self, detector_name: str): + """Background thread that writes frames to disk.""" + filepath = self.file_paths[detector_name] + det_ctx = self.detectors[detector_name] + + # Ensure directory exists + os.makedirs(os.path.dirname(filepath), exist_ok=True) if os.path.dirname(filepath) else None + + # Determine photometric mode + photometric = "rgb" if det_ctx.dtype in ['rgb', 'RGB'] else None + + try: + with tifffile.TiffWriter(filepath, bigtiff=self.bigtiff, append=self.append_mode) as tif: + stop_event = self.stop_events[detector_name] + + while not stop_event.is_set() or len(self.queues[detector_name]) > 0: + # Get frame from queue + with self.locks[detector_name]: + if self.queues[detector_name]: + frame, metadata = self.queues[detector_name].popleft() + else: + frame = None + + if frame is not None: + try: + # Write frame with metadata + tif.write( + data=frame, + metadata=metadata, + photometric=photometric + ) + except Exception as e: + logger.error(f"Error writing frame for {detector_name}: {e}") + else: + # Sleep briefly to avoid busy loop + time.sleep(0.01) + + except Exception as e: + logger.error(f"Error in writer loop for {detector_name}: {e}") + + def finalize(self) -> None: + """Finalize writing (signal threads to stop after queue is empty).""" + if self._is_finalized: + return + + # Signal all threads to stop + for stop_event in self.stop_events.values(): + stop_event.set() + + self._is_finalized = True + logger.info("OME-TIFF writer finalized") + + def close(self) -> None: + """Close files and wait for threads to finish.""" + if not self._is_open: + return + + # Wait for all threads to finish + for det_name, thread in self.threads.items(): + if thread.is_alive(): + logger.debug(f"Waiting for writer thread for {det_name}...") + thread.join(timeout=30.0) + if thread.is_alive(): + logger.warning(f"Writer thread for {det_name} did not finish in time") + + self._is_open = False + logger.info("OME-TIFF writer closed") + + +# Copyright (C) 2020-2024 ImSwitch developers +# This file is part of ImSwitch. +# +# ImSwitch is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ImSwitch is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . diff --git a/imswitch/imcontrol/model/writers/ome_zarr_writer.py b/imswitch/imcontrol/model/writers/ome_zarr_writer.py new file mode 100644 index 000000000..3c4a96e98 --- /dev/null +++ b/imswitch/imcontrol/model/writers/ome_zarr_writer.py @@ -0,0 +1,134 @@ +""" +OME-Zarr (NGFF) writer with full OME-NGFF v0.4 metadata support. + +Writes acquisition data as OME-Zarr format following the OME-NGFF +specification for cloud-optimized n-dimensional bioimaging data. +""" + +import os +import zarr +from typing import Dict, List, Optional +import numpy as np +import logging + +from .base import WriterBase, SessionContext, DetectorContext, FrameEvent, WriterCapabilities +from .registry import register_writer + +logger = logging.getLogger(__name__) + + +@register_writer('OME_ZARR') +class OMEZarrWriter(WriterBase): + """ + OME-Zarr (NGFF) writer with OME-NGFF v0.4 metadata. + + Features: + - OME-NGFF v0.4 compliant metadata + - Multi-resolution pyramids (optional) + - Chunked storage for efficient access + - Support for time-series and z-stacks + - Per-plane metadata storage + + TODO: Full implementation to be extracted from experiment_controller + """ + + def __init__(self, session_ctx: SessionContext, + chunk_size: tuple = (1, 256, 256)): + """ + Initialize OME-Zarr writer. + + Args: + session_ctx: Session metadata + chunk_size: Chunk size for Zarr arrays (t, y, x) + """ + super().__init__(session_ctx) + self.chunk_size = chunk_size + self.detectors: Dict[str, DetectorContext] = {} + self.zarr_groups: Dict[str, zarr.Group] = {} + self.arrays: Dict[str, zarr.Array] = {} + + @classmethod + def get_capabilities(cls) -> List[WriterCapabilities]: + return [ + WriterCapabilities.SINGLE_FILE, + WriterCapabilities.STREAMING, + WriterCapabilities.METADATA_RICH, + WriterCapabilities.MULTI_DETECTOR, + WriterCapabilities.MULTI_CHANNEL, + WriterCapabilities.TIME_SERIES, + WriterCapabilities.Z_STACK, + ] + + @classmethod + def get_file_extension(cls) -> str: + return '.zarr' + + def open(self, detectors: Dict[str, DetectorContext]) -> None: + """Open Zarr store and create arrays for each detector.""" + if self._is_open: + logger.warning("Writer already open") + return + + self.detectors = detectors + + # Create base Zarr directory + base_path = self.session_ctx.base_path + if not base_path: + base_path = f"{self.session_ctx.session_id}.zarr" + + # TODO: Full implementation + # This is a placeholder - actual implementation should: + # 1. Create Zarr store with proper OME-NGFF metadata + # 2. Set up multi-resolution pyramids + # 3. Create coordinate transformations + # 4. Add OME-XML metadata + + logger.warning("OMEZarrWriter is a placeholder - full implementation pending") + self._is_open = True + + def write(self, + detector_name: str, + frames: np.ndarray, + events: Optional[List[FrameEvent]] = None) -> None: + """Write frames to Zarr array.""" + if not self._is_open: + raise RuntimeError("Writer not open") + + # TODO: Implement frame writing + logger.debug(f"Writing {len(frames)} frames for {detector_name}") + + def finalize(self) -> None: + """Finalize Zarr metadata.""" + if self._is_finalized: + return + + # TODO: Write final OME-NGFF metadata + + self._is_finalized = True + logger.info("OME-Zarr writer finalized") + + def close(self) -> None: + """Close Zarr store.""" + if not self._is_open: + return + + # Zarr automatically closes + self._is_open = False + logger.info("OME-Zarr writer closed") + + +# Copyright (C) 2020-2024 ImSwitch developers +# This file is part of ImSwitch. +# +# ImSwitch is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ImSwitch is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . diff --git a/imswitch/imcontrol/model/writers/registry.py b/imswitch/imcontrol/model/writers/registry.py new file mode 100644 index 000000000..db28e02c5 --- /dev/null +++ b/imswitch/imcontrol/model/writers/registry.py @@ -0,0 +1,106 @@ +""" +Writer registry for mapping file formats to writer implementations. + +Replaces the old DEFAULT_STORER_MAP pattern with a more flexible +registration system. +""" + +from typing import Dict, Type, Optional +from .base import WriterBase +import logging + +logger = logging.getLogger(__name__) + + +class WriterRegistry: + """ + Registry for file format writers. + + Allows writers to be registered by format name and retrieved + for use by RecordingManager, ExperimentController, etc. + """ + + _writers: Dict[str, Type[WriterBase]] = {} + + @classmethod + def register(cls, format_name: str, writer_class: Type[WriterBase]): + """ + Register a writer for a format. + + Args: + format_name: Format identifier (e.g., 'OME_TIFF', 'OME_ZARR', 'PNG') + writer_class: Writer class (subclass of WriterBase) + """ + if not issubclass(writer_class, WriterBase): + raise TypeError(f"{writer_class} must be a subclass of WriterBase") + + cls._writers[format_name.upper()] = writer_class + logger.debug(f"Registered writer {writer_class.__name__} for format {format_name}") + + @classmethod + def get(cls, format_name: str) -> Optional[Type[WriterBase]]: + """ + Get writer class for a format. + + Args: + format_name: Format identifier + + Returns: + Writer class or None if not found + """ + return cls._writers.get(format_name.upper()) + + @classmethod + def list_formats(cls) -> list: + """List all registered formats.""" + return list(cls._writers.keys()) + + @classmethod + def clear(cls): + """Clear all registered writers (mainly for testing).""" + cls._writers.clear() + + +def register_writer(format_name: str): + """ + Decorator to register a writer class. + + Usage: + @register_writer('OME_TIFF') + class OMETiffWriter(WriterBase): + ... + """ + def decorator(writer_class: Type[WriterBase]): + WriterRegistry.register(format_name, writer_class) + return writer_class + return decorator + + +def get_writer(format_name: str) -> Optional[Type[WriterBase]]: + """ + Get writer class for a format. + + Args: + format_name: Format identifier + + Returns: + Writer class or None if not found + """ + return WriterRegistry.get(format_name) + + +# Copyright (C) 2020-2024 ImSwitch developers +# This file is part of ImSwitch. +# +# ImSwitch is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ImSwitch is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . diff --git a/imswitch/imcontrol/model/writers/uuid_gen.py b/imswitch/imcontrol/model/writers/uuid_gen.py new file mode 100644 index 000000000..b1b63057f --- /dev/null +++ b/imswitch/imcontrol/model/writers/uuid_gen.py @@ -0,0 +1,155 @@ +""" +UUID generation for image files with metadata-based content IDs. + +Provides deterministic UUIDs derived from metadata hashes for +tamper-evident file identification. +""" + +import hashlib +import json +import uuid +from typing import Dict, Any +from collections import OrderedDict + + +def canonicalize_metadata(metadata: Dict[str, Any]) -> bytes: + """ + Convert metadata to canonical bytes representation. + + Uses stable sorting and formatting to ensure the same metadata + always produces the same hash, regardless of dict ordering or + floating point representation variations. + + Args: + metadata: Metadata dictionary + + Returns: + Canonical bytes representation + """ + def normalize_value(v): + """Normalize a value for stable hashing.""" + if isinstance(v, float): + # Format floats to fixed precision + return f"{v:.10e}" + elif isinstance(v, dict): + # Recursively sort dict keys + return OrderedDict(sorted((k, normalize_value(val)) for k, val in v.items())) + elif isinstance(v, (list, tuple)): + # Normalize lists/tuples + return [normalize_value(item) for item in v] + else: + # Return as-is for strings, ints, bools, None + return v + + # Normalize the metadata + normalized = normalize_value(metadata) + + # Convert to JSON with sorted keys + canonical_json = json.dumps( + normalized, + sort_keys=True, + separators=(',', ':'), # No whitespace + ensure_ascii=True + ) + + return canonical_json.encode('utf-8') + + +def compute_content_id(metadata: Dict[str, Any], namespace: str = "ImSwitch") -> str: + """ + Compute a deterministic content ID from metadata. + + Uses SHA-256 hash of canonical metadata to generate a UUIDv5-like + identifier. The same metadata always produces the same ID. + + Args: + metadata: Metadata dictionary + namespace: Namespace for UUID generation + + Returns: + Content ID as hex string (e.g., 'sha256:abc123...') + """ + canonical = canonicalize_metadata(metadata) + + # Compute SHA-256 hash + hash_obj = hashlib.sha256() + hash_obj.update(namespace.encode('utf-8')) + hash_obj.update(b'\x00') # Separator + hash_obj.update(canonical) + + # Return as prefixed hex string + return f"sha256:{hash_obj.hexdigest()}" + + +def compute_uuid5(metadata: Dict[str, Any], namespace_uuid: uuid.UUID = None) -> str: + """ + Compute a UUIDv5 from metadata. + + Uses the standard UUID namespace approach for compatibility + with systems that expect RFC 4122 UUIDs. + + Args: + metadata: Metadata dictionary + namespace_uuid: UUID namespace (defaults to DNS namespace) + + Returns: + UUIDv5 as string + """ + if namespace_uuid is None: + # Use DNS namespace as default + namespace_uuid = uuid.NAMESPACE_DNS + + canonical = canonicalize_metadata(metadata) + + # Generate UUIDv5 + content_uuid = uuid.uuid5(namespace_uuid, canonical.decode('utf-8')) + + return str(content_uuid) + + +def generate_session_uuid( + detector_name: str, + start_timestamp: float, + user: str = None, + project: str = None +) -> str: + """ + Generate a session UUID from key identifying metadata. + + Args: + detector_name: Detector name + start_timestamp: Session start timestamp + user: Optional user name + project: Optional project name + + Returns: + Session UUID as string + """ + metadata = { + 'detector': detector_name, + 'timestamp': start_timestamp, + } + + if user: + metadata['user'] = user + if project: + metadata['project'] = project + + return compute_uuid5(metadata) + + +# Copyright (C) 2020-2024 ImSwitch developers +# This file is part of ImSwitch. +# +# ImSwitch is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ImSwitch is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . diff --git a/imswitch/imcontrol/view/ImConMainView.py b/imswitch/imcontrol/view/ImConMainView.py index 960e3ae1d..7103dc0a9 100644 --- a/imswitch/imcontrol/view/ImConMainView.py +++ b/imswitch/imcontrol/view/ImConMainView.py @@ -19,7 +19,6 @@ class ImConMainView(QMainWindow): - sigLoadParamsFromHDF5 = Signal() sigPickSetup = Signal() sigClosing = Signal() @@ -46,10 +45,7 @@ def __init__(self, options, viewSetupInfo, *args, **kwargs): configuration = menuBar.addMenu('&Configuration') self.shortcuts = menuBar.addMenu('&Shortcuts') - self.loadParamsAction = QtWidgets.QAction('Load parameters from saved HDF5 file…', self) - self.loadParamsAction.setShortcut('Ctrl+P') - self.loadParamsAction.triggered.connect(self.sigLoadParamsFromHDF5) - file.addAction(self.loadParamsAction) + self.pickSetupAction = QtWidgets.QAction('Pick hardware setup…', self) self.pickSetupAction.triggered.connect(self.sigPickSetup) @@ -72,7 +68,6 @@ def __init__(self, options, viewSetupInfo, *args, **kwargs): 'SLM': _DockInfo(name='SLM', yPosition=0), 'SIM': _DockInfo(name='SIM', yPosition=0), 'DPC': _DockInfo(name='DPC', yPosition=0), - 'MCT': _DockInfo(name='MCT', yPosition=0), 'Lepmon': _DockInfo(name='Lepmon', yPosition=0), 'Experiment': _DockInfo(name='Experiment', yPosition=0), 'Timelapse': _DockInfo(name='Timelapse', yPosition=0), @@ -105,7 +100,6 @@ def __init__(self, options, viewSetupInfo, *args, **kwargs): 'ULenses': _DockInfo(name='uLenses Tool', yPosition=3), 'FFT': _DockInfo(name='FFT Tool', yPosition=3), 'Holo': _DockInfo(name='Holo Tool', yPosition=3), - 'Joystick': _DockInfo(name='Joystick Tool', yPosition=3), 'Histogramm': _DockInfo(name='Histogramm Tool', yPosition=3), 'STORMRecon': _DockInfo(name='STORM Recon Tool', yPosition=2), 'HoliSheet': _DockInfo(name='HoliSheet Tool', yPosition=3), diff --git a/imswitch/imcontrol/view/widgets/JoystickWidget.py b/imswitch/imcontrol/view/widgets/JoystickWidget.py deleted file mode 100644 index 2c3d178b4..000000000 --- a/imswitch/imcontrol/view/widgets/JoystickWidget.py +++ /dev/null @@ -1,56 +0,0 @@ -from qtpy import QtCore, QtWidgets - -from imswitch.imcommon.view.guitools import joystick -from .basewidgets import NapariHybridWidget - - -class JoystickWidget(NapariHybridWidget): - """ Displays the Joystick transform of the image. """ - - sigJoystickXY = QtCore.Signal(float, float) - sigJoystickZA = QtCore.Signal(float, float) - - def __post_init__(self): - - # Add elements to GridLayout - self.grid = QtWidgets.QGridLayout() - self.setLayout(self.grid) - - # initialize the joystick - self.textEditJoystickZA = QtWidgets.QLabel("Joystick Z/A") - self.joystickZA = joystick.Joystick(callbackFct=self.getValueJoyStickXY) - - self.textEditJoystickXY = QtWidgets.QLabel("Joystick X/Y") - self.joystickXY = joystick.Joystick(callbackFct=self.getValueJoyStickAZ) - - self.grid.addWidget(self.textEditJoystickZA, 0, 1) - self.grid.addWidget(self.joystickZA, 1, 0) - self.grid.addWidget(self.textEditJoystickXY, 0, 0) - self.grid.addWidget(self.joystickXY, 1, 1) - - def getValueJoyStickXY(self, x, y): - self.sigJoystickXY.emit(x, y) - return x, y - - def getValueJoyStickAZ(self, a, z): - self.sigJoystickZA.emit(a, z) - return a, z - - - - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imcontrol/view/widgets/MCTWidget.py b/imswitch/imcontrol/view/widgets/MCTWidget.py deleted file mode 100644 index d143b383e..000000000 --- a/imswitch/imcontrol/view/widgets/MCTWidget.py +++ /dev/null @@ -1,285 +0,0 @@ -from qtpy import QtCore, QtWidgets - -from imswitch.imcontrol.view import guitools -from .basewidgets import NapariHybridWidget - - -class MCTWidget(NapariHybridWidget): - """ Widget containing mct interface. """ - layer = None # napari placeholder layer - - sigMCTInitFilterPos = QtCore.Signal(bool) # (enabled) - sigMCTShowLast = QtCore.Signal(bool) # (enabled) - sigMCTStop = QtCore.Signal(bool) # (enabled) - sigMCTStart = QtCore.Signal(bool) # (enabled) - sigMCTSelectScanCoordinates = QtCore.Signal(bool) - - - sigShowToggled = QtCore.Signal(bool) # (enabled) - sigUpdateRateChanged = QtCore.Signal(float) # (rate) - - - sigSliderIllu1ValueChanged = QtCore.Signal(float) # (value) - sigSliderIllu2ValueChanged = QtCore.Signal(float) # (value) - sigSliderIllu3ValueChanged = QtCore.Signal(float) # (value) - - def __post_init__(self): - # initialize all GUI elements - mainWidget = QtWidgets.QWidget() - # Define all widgets - self.mctLabelTimePeriod = QtWidgets.QLabel('Period T (s):') - self.mctValueTimePeriod = QtWidgets.QLineEdit('5') - - self.mctLabelTimeDuration = QtWidgets.QLabel('N Measurements:') - self.mctValueTimeDuration = QtWidgets.QLineEdit('1') - - self.mctLabelZStack = QtWidgets.QLabel('Z-Stack (min,max,steps):') - self.mctValueZmin = QtWidgets.QLineEdit('-100') - self.mctValueZmax = QtWidgets.QLineEdit('100') - self.mctValueZsteps = QtWidgets.QLineEdit('10') - - self.mctLabelXScan = QtWidgets.QLabel('X Scan (min,max,steps):') - self.mctValueXmin = QtWidgets.QLineEdit('-1000') - self.mctValueXmax = QtWidgets.QLineEdit('1000') - self.mctValueXsteps = QtWidgets.QLineEdit('100') - - self.mctLabelYScan = QtWidgets.QLabel('Y Scan (min,max,steps):') - self.mctValueYmin = QtWidgets.QLineEdit('-1000') - self.mctValueYmax = QtWidgets.QLineEdit('1000') - self.mctValueYsteps = QtWidgets.QLineEdit('100') - - self.autofocusLabel = QtWidgets.QLabel('Autofocus (range, steps, every n-th measurement):') - self.autofocusRange = QtWidgets.QLineEdit('200') - self.autofocusSteps = QtWidgets.QLineEdit('20') - self.autofocusPeriod = QtWidgets.QLineEdit('10') - - self.autofocusLaser1Checkbox = QtWidgets.QCheckBox('Laser 1') - self.autofocusLaser1Checkbox.setCheckable(True) - - self.autofocusLaser2Checkbox = QtWidgets.QCheckBox('Laser 2') - self.autofocusLaser2Checkbox.setCheckable(True) - - self.autofocusLED1Checkbox = QtWidgets.QCheckBox('LED 1') - self.autofocusLED1Checkbox.setCheckable(True) - - self.autofocusSelectionLabel = QtWidgets.QLabel('Lightsource for AF:') - - valueDecimalsLaser = 1 - valueRangeLaser = (0, 2 ** 15) - tickIntervalLaser = 1 - singleStepLaser = 1 - - self.sliderIllu1, self.mctLabelIllu1 = self.setupSliderGui('Intensity (Laser 1):', valueDecimalsLaser, valueRangeLaser, tickIntervalLaser, singleStepLaser) - self.sliderIllu1.valueChanged.connect( - lambda value: self.sigSliderIllu1ValueChanged.emit(value) - ) - self.sliderIllu2, self.mctLabelIllu2 = self.setupSliderGui('Intensity (Laser 2):', valueDecimalsLaser, valueRangeLaser, tickIntervalLaser, singleStepLaser) - self.sliderIllu2.valueChanged.connect( - lambda value: self.sigSliderIllu2ValueChanged.emit(value) - ) - - valueDecimalsLED = 1 - valueRangeLED = (0, 2 ** 8) - tickIntervalLED = 1 - singleStepLED = 1 - - self.sliderIllu3, self.mctLabelIllu3 = self.setupSliderGui('Intensity (LED):', valueDecimalsLED, valueRangeLED, tickIntervalLED, singleStepLED) - self.sliderIllu3.valueChanged.connect( - lambda value: self.sigSliderIllu3ValueChanged.emit(value) - ) - - self.mctLabelFileName = QtWidgets.QLabel('FileName:') - self.mctEditFileName = QtWidgets.QLineEdit('MCT') - self.mctNImages = QtWidgets.QLabel('Number of images: ') - - self.mctLabelScanPositionList = QtWidgets.QLabel("Scan Position List:") - self.mctSelectScanPositionList = guitools.BetterPushButton('Select XY Coordinates') - self.mctSelectScanPositionList.setCheckable(True) - self.mctSelectScanPositionList.toggled.connect(self.sigMCTSelectScanCoordinates) - - self.mctStartButton = guitools.BetterPushButton('Start') - self.mctStartButton.setCheckable(False) - self.mctStartButton.toggled.connect(self.sigMCTStart) - - self.mctStopButton = guitools.BetterPushButton('Stop') - self.mctStopButton.setCheckable(False) - self.mctStopButton.toggled.connect(self.sigMCTStop) - - self.mctShowLastButton = guitools.BetterPushButton('Show Last') - self.mctShowLastButton.setCheckable(False) - self.mctShowLastButton.toggled.connect(self.sigMCTShowLast) - - self.mctInitFilterButton = guitools.BetterPushButton('Init Filter Pos.') - self.mctInitFilterButton.setCheckable(False) - self.mctInitFilterButton.toggled.connect(self.sigMCTInitFilterPos) - - self.mctDoZStack = QtWidgets.QCheckBox('Perform Z-Stack') - self.mctDoZStack.setCheckable(True) - - self.mctDoXYScan = QtWidgets.QCheckBox('Perform XY Scan') - self.mctDoXYScan.setCheckable(True) - - # Define layout and add widgets to it - self.grid = QtWidgets.QGridLayout() - - self.grid.addWidget(self.mctLabelTimePeriod, 0, 0, 1, 1) - self.grid.addWidget(self.mctValueTimePeriod, 0, 1, 1, 1) - self.grid.addWidget(self.mctLabelTimeDuration, 0, 2, 1, 1) - self.grid.addWidget(self.mctValueTimeDuration, 0, 3, 1, 1) - self.grid.addWidget(self.mctLabelZStack, 1, 0, 1, 1) - self.grid.addWidget(self.mctValueZmin, 1, 1, 1, 1) - self.grid.addWidget(self.mctValueZmax, 1, 2, 1, 1) - self.grid.addWidget(self.mctValueZsteps, 1, 3, 1, 1) - self.grid.addWidget(self.mctLabelXScan, 2, 0, 1, 1) - self.grid.addWidget(self.mctValueXmin, 2, 1, 1, 1) - self.grid.addWidget(self.mctValueXmax, 2, 2, 1, 1) - self.grid.addWidget(self.mctValueXsteps, 2, 3, 1, 1) - self.grid.addWidget(self.mctLabelYScan, 3, 0, 1, 1) - self.grid.addWidget(self.mctValueYmin, 3, 1, 1, 1) - self.grid.addWidget(self.mctValueYmax, 3, 2, 1, 1) - self.grid.addWidget(self.mctValueYsteps, 3, 3, 1, 1) - self.grid.addWidget(self.mctLabelIllu1, 4, 0, 1, 1) - self.grid.addWidget(self.sliderIllu1, 4, 1, 1, 3) - self.grid.addWidget(self.mctLabelIllu2, 5, 0, 1, 1) - self.grid.addWidget(self.sliderIllu2, 5, 1, 1, 3) - self.grid.addWidget(self.mctLabelIllu3, 6, 0, 1, 1) - self.grid.addWidget(self.sliderIllu3, 6, 1, 1, 3) - self.grid.addWidget(self.mctLabelFileName, 7, 0, 1, 1) - self.grid.addWidget(self.mctEditFileName, 7, 1, 1, 1) - self.grid.addWidget(self.mctNImages, 7, 2, 1, 1) - self.grid.addWidget(self.mctDoZStack, 7, 3, 1, 1) - self.grid.addWidget(self.autofocusLabel, 8, 0, 1, 1) - self.grid.addWidget(self.autofocusRange, 8, 1, 1, 1) - self.grid.addWidget(self.autofocusSteps, 8, 2, 1, 1) - self.grid.addWidget(self.autofocusPeriod, 8, 3, 1, 1) - self.grid.addWidget(self.autofocusSelectionLabel, 9, 0, 1, 1) - self.grid.addWidget(self.autofocusLaser1Checkbox, 9, 1, 1, 1) - self.grid.addWidget(self.autofocusLaser2Checkbox, 9, 2, 1, 1) - self.grid.addWidget(self.autofocusLED1Checkbox, 9, 3, 1, 1) - self.grid.addWidget(self.mctStartButton, 10, 0, 1, 1) - self.grid.addWidget(self.mctStopButton, 10, 1, 1, 1) - self.grid.addWidget(self.mctShowLastButton, 10, 2, 1, 1) - self.grid.addWidget(self.mctDoXYScan, 10, 3, 1, 1) - - # Create the main widget and set its layout - mainWidget.setLayout(self.grid) - - # Create the QScrollArea and set the main widget as its content - self.scrollArea = QtWidgets.QScrollArea() - self.scrollArea.setWidget(mainWidget) - self.scrollArea.setWidgetResizable(True) - - # Create the main layout and add the QScrollArea to it - mainLayout = QtWidgets.QVBoxLayout() - mainLayout.addWidget(self.scrollArea) - - # Set the main layout to the main widget - self.setLayout(mainLayout) - - def isAutofocus(self): - if self.autofocusLED1Checkbox.isChecked() or self.autofocusLaser1Checkbox.isChecked() or self.autofocusLaser2Checkbox.isChecked(): - return True - else: - return False - - def getAutofocusValues(self): - autofocusParams = {} - autofocusParams["valueRange"] = self.autofocusRange.text() - autofocusParams["valueSteps"] = self.autofocusSteps.text() - autofocusParams["valuePeriod"] = self.autofocusPeriod.text() - if self.autofocusLED1Checkbox.isChecked(): - autofocusParams["illuMethod"] = 'LED' - elif self.autofocusLaser1Checkbox.isChecked(): - autofocusParams["illuMethod"] = 'Laser1' - elif self.autofocusLaser2Checkbox.isChecked(): - autofocusParams["illuMethod"] = 'Laser2' - else: - autofocusParams["illuMethod"] = False - - return autofocusParams - - - def setupSliderGui(self, label, valueDecimals, valueRange, tickInterval, singleStep): - mctLabel = QtWidgets.QLabel(label) - valueRangeMin, valueRangeMax = valueRange - slider = guitools.FloatSlider(QtCore.Qt.Horizontal, self, allowScrollChanges=False, - decimals=valueDecimals) - slider.setFocusPolicy(QtCore.Qt.NoFocus) - slider.setMinimum(valueRangeMin) - slider.setMaximum(valueRangeMax) - slider.setTickInterval(tickInterval) - slider.setSingleStep(singleStep) - slider.setValue(0) - return slider, mctLabel - - def getImage(self): - if self.layer is not None: - return self.img.image - - def setImage(self, im, colormap="gray", name="", pixelsize=(1,1,1), translation=(0,0,0)): - if len(im.shape) == 2: - translation = (translation[0], translation[1]) - if self.layer is None or name not in self.viewer.layers: - self.layer = self.viewer.add_image(im, rgb=False, colormap=colormap, - scale=pixelsize,translate=translation, - name=name, blending='additive') - self.layer.data = im - - - def getZStackValues(self): - valueZmin = -abs(float(self.mctValueZmin.text())) - valueZmax = float(self.mctValueZmax.text()) - valueZsteps = float(self.mctValueZsteps.text()) - valueZenabled = bool(self.mctDoZStack.isChecked()) - - return valueZmin, valueZmax, valueZsteps, valueZenabled - - - def getXYScanValues(self): - valueXmin = -abs(float(self.mctValueXmin.text())) - valueXmax = float(self.mctValueXmax.text()) - valueXsteps = float(self.mctValueXsteps.text()) - - valueYmin = -abs(float(self.mctValueYmin.text())) - valueYmax = float(self.mctValueYmax.text()) - valueYsteps = float(self.mctValueYsteps.text()) - - valueXYenabled = bool(self.mctDoXYScan.isChecked()) - - return valueXmin, valueXmax, valueXsteps, valueYmin, valueYmax, valueYsteps, valueXYenabled - - - def getTimelapseValues(self): - mctValueTimePeriod = float(self.mctValueTimePeriod.text()) - mctValueTimeDuration = int(self.mctValueTimeDuration.text()) - return mctValueTimePeriod, mctValueTimeDuration - - def getFilename(self): - mctEditFileName = self.mctEditFileName.text() - return mctEditFileName - - def setMessageGUI(self, message): - nImages2Do = self.getTimelapseValues()[-1] - if type(message) == str: - self.mctNImages.setText(message) - else: - self.mctNImages.setText('Number of images: '+str(message+1) + " / " + str(nImages2Do)) - - - - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imcontrol/view/widgets/RecordingWidget.py b/imswitch/imcontrol/view/widgets/RecordingWidget.py index 1c159ed69..c6b566e6f 100644 --- a/imswitch/imcontrol/view/widgets/RecordingWidget.py +++ b/imswitch/imcontrol/view/widgets/RecordingWidget.py @@ -96,7 +96,7 @@ def __init__(self, *args, **kwargs): self.saveFormatLabel = QtWidgets.QLabel('File format:') self.saveFormatList = QtWidgets.QComboBox() - self.saveFormatList.addItems(['TIFF', 'HDF5', 'ZARR', 'MP4']) + self.saveFormatList.addItems(['TIFF', 'ZARR', 'MP4']) self.snapSaveModeLabel = QtWidgets.QLabel('Snap save mode:') self.snapSaveModeList = QtWidgets.QComboBox() diff --git a/imswitch/imcontrol/view/widgets/__init__.py b/imswitch/imcontrol/view/widgets/__init__.py index 366dee9b5..af45545b4 100644 --- a/imswitch/imcontrol/view/widgets/__init__.py +++ b/imswitch/imcontrol/view/widgets/__init__.py @@ -9,9 +9,7 @@ from .basewidgets import WidgetFactory from .BeadRecWidget import BeadRecWidget from .ConsoleWidget import ConsoleWidget - from .EtSTEDWidget import EtSTEDWidget from .FFTWidget import FFTWidget - from .JoystickWidget import JoystickWidget from .HistogrammWidget import HistogrammWidget from .STORMReconWidget import STORMReconWidget from .HoliSheetWidget import HoliSheetWidget @@ -40,7 +38,6 @@ from .UC2ConfigWidget import UC2ConfigWidget from .SIMWidget import SIMWidget from .DPCWidget import DPCWidget - from .MCTWidget import MCTWidget from .LepmonWidget import LepmonWidget from .ExperimentWidget import ExperimentWidget from .TimelapseWidget import TimelapseWidget @@ -52,7 +49,6 @@ from .HistoScanWidget import HistoScanWidget from .WorkflowWidget import WorkflowWidget from .FlatfieldWidget import FlatfieldWidget - from .PixelCalibrationWidget import PixelCalibrationWidget from .SquidStageScanWidget import SquidStageScanWidget from .ISMWidget import ISMWidget from .SettingsWidget import SettingsWidget diff --git a/imswitch/imreconstruct/__init__.py b/imswitch/imreconstruct/__init__.py deleted file mode 100644 index 7a6359bbe..000000000 --- a/imswitch/imreconstruct/__init__.py +++ /dev/null @@ -1,37 +0,0 @@ -__imswitch_module__ = True -__title__ = 'Image Reconstruction' - - -def getMainViewAndController(moduleCommChannel, *_args, **_kwargs): - import os - from imswitch.imcommon.model import dirtools - os.environ['PATH'] = os.environ['PATH'] + ';' + dirtools.DataFileDirs.Libs - - from .controller import ImRecMainController - from .view import ImRecMainView - - view = ImRecMainView() - try: - controller = ImRecMainController(view, moduleCommChannel) - except Exception as e: - view.close() - raise e - - return view, controller - - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imreconstruct/__main__.py b/imswitch/imreconstruct/__main__.py deleted file mode 100644 index 7e5d7634e..000000000 --- a/imswitch/imreconstruct/__main__.py +++ /dev/null @@ -1,32 +0,0 @@ -from imswitch import imreconstruct -from imswitch.imcommon import prepareApp, launchApp -from imswitch.imcommon.controller import ModuleCommunicationChannel - - -def main(): - app = prepareApp() - moduleCommChannel = ModuleCommunicationChannel() - moduleCommChannel.register(imreconstruct) - mainView, mainController = imreconstruct.getMainViewAndController(moduleCommChannel) - launchApp(app, mainView, [mainController]) - - -if __name__ == '__main__': - main() - - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imreconstruct/controller/CommunicationChannel.py b/imswitch/imreconstruct/controller/CommunicationChannel.py deleted file mode 100644 index b5dd2e2d9..000000000 --- a/imswitch/imreconstruct/controller/CommunicationChannel.py +++ /dev/null @@ -1,43 +0,0 @@ -from imswitch.imcommon.framework import Signal, SignalInterface - - -class CommunicationChannel(SignalInterface): - """ - Communication Channel is a class that handles the communication between Master Controller - and Widgets, or between Widgets. - """ - - sigDataFolderChanged = Signal(object) # (dataFolderPath) - - sigSaveFolderChanged = Signal(object) # (saveFolderPath) - - sigCurrentDataChanged = Signal(object) # (dataObj) - - sigScanParamsUpdated = Signal(object, bool) # (scanParDict, applyOnCurrentRecon) - - sigPatternUpdated = Signal(object) # (pattern) - - sigPatternVisibilityChanged = Signal(bool) # (visible) - - sigAddToMultiData = Signal(str, str) # (path, datasetName) - - sigReconstruct = Signal(object, bool) - - sigExecutionFinished = Signal(object) - - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imreconstruct/controller/DataEditController.py b/imswitch/imreconstruct/controller/DataEditController.py deleted file mode 100644 index 2fb98a379..000000000 --- a/imswitch/imreconstruct/controller/DataEditController.py +++ /dev/null @@ -1,54 +0,0 @@ -import numpy as np - -from .basecontrollers import ImRecWidgetController - - -class DataEditController(ImRecWidgetController): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self._dataObj = None - self._meanData = None - - self._widget.sigImageSliceChanged.connect(self.setImgSlice) - self._widget.sigShowMeanClicked.connect(self.showMean) - self._widget.sigSetDarkFrameClicked.connect(self.setDarkFrame) - - def setData(self, inDataObj): - self._dataObj = inDataObj - self._meanData = np.array(np.mean(self._dataObj.data, 0), dtype=np.float32) - self.showMean() - self._widget.updateDataProperties(self._dataObj.name, self._dataObj.datasetName, - self._dataObj.numFrames) - - def setImgSlice(self, frameNumber): - if self._dataObj is None or frameNumber >= len(self._dataObj.data): - return - - self._widget.setImage(self._dataObj.data[frameNumber], autoLevels=False) - - def setDarkFrame(self): - # self.dataObj.data = self.dataObj.data[0:100] - pass - - def showMean(self): - if self._meanData is None: - return - - self._widget.setImage(self._meanData, autoLevels=True) - - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imreconstruct/controller/DataFrameController.py b/imswitch/imreconstruct/controller/DataFrameController.py deleted file mode 100644 index e76f71a5e..000000000 --- a/imswitch/imreconstruct/controller/DataFrameController.py +++ /dev/null @@ -1,112 +0,0 @@ -import numpy as np - -from .DataEditController import DataEditController -from .basecontrollers import ImRecWidgetController - - -class DataFrameController(ImRecWidgetController): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self.editWindowController = self._factory.createController( - DataEditController, self._widget.editWdw - ) - - self._dataObj = None - self._pattern = [] - self._patternGrid = [] - self._patternGridMade = False - self._patternVisible = False - - self._commChannel.sigCurrentDataChanged.connect(self.currentDataChanged) - self._commChannel.sigPatternUpdated.connect(self.patternUpdated) - self._commChannel.sigPatternVisibilityChanged.connect(self.patternVisibilityChanged) - - self._widget.sigShowMeanClicked.connect(self.showMean) - self._widget.sigAdjustDataClicked.connect(self.adjustData) - self._widget.sigUnloadDataClicked.connect(self.unloadData) - self._widget.sigFrameNumberChanged.connect(self.setImgSlice) - self._widget.sigFrameSliderChanged.connect(self.setImgSlice) - - def patternUpdated(self, pattern): - self._pattern = pattern - self._patternGridMade = False - if self._patternVisible: - self.makePatternGrid() - - def patternVisibilityChanged(self, showPattern): - self._patternVisible = showPattern - if showPattern and not self._patternGridMade: - self.makePatternGrid() - - self._widget.setShowPattern(showPattern) - - def setImgSlice(self, frame): - self._widget.setImage(self._dataObj.data[frame], autoLevels=False) - - def unloadData(self): - self._dataObj = None - self.showMean() - self._widget.setNumFrames(0) - self._widget.setDataName('') - self._widget.setDatasetName('') - - def adjustData(self): - self._logger.debug('In adjust data') - if self._dataObj is not None: - self.editWindowController.setData(self._dataObj) - self._widget.showEditWindow() - else: - self._logger.error('No data to edit') - - def showMean(self): - self._widget.setImage(self._dataObj.getMeanData(), autoLevels=True) - - def currentDataChanged(self, inDataObj): - self._dataObj = inDataObj - self._logger.debug(f'Data shape: {self._dataObj.data.shape}') - self.showMean() - self._widget.setNumFrames(self._dataObj.numFrames) - self._widget.setDataName(self._dataObj.name) - self._widget.setDatasetName(self._dataObj.datasetName) - - def makePatternGrid(self): - """ Pattern is now [Row-offset, Col-offset, Row-period, Col-period] where - offset is calculated from the upper left corner (0, 0), while the - scatter plot plots from lower left corner, so a flip has to be made - in rows.""" - numCols = np.size(self._dataObj.data, 1) - numRows = np.size(self._dataObj.data, 2) - numPointsCol = int(1 + np.floor(((numCols - 1) - self._pattern[1]) / self._pattern[3])) - numPointsRow = int(1 + np.floor(((numRows - 1) - self._pattern[0]) / self._pattern[2])) - colCoords = np.linspace(self._pattern[1], - self._pattern[1] + (numPointsCol - 1) * self._pattern[3], - numPointsCol) - rowCoords = np.linspace(self._pattern[0], - self._pattern[0] + (numPointsRow - 1) * self._pattern[2], - numPointsRow) - colCoords = np.repeat(colCoords, numPointsRow) - rowCoords = np.tile(rowCoords, numPointsCol) - - self._patternGrid = [colCoords, rowCoords] - self._widget.setPatternGridData(x=self._patternGrid[0], y=self._patternGrid[1]) - - self._patternGridMade = True - self._logger.debug('Made new pattern grid') - - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imreconstruct/controller/ImRecMainController.py b/imswitch/imreconstruct/controller/ImRecMainController.py deleted file mode 100644 index 73c57d954..000000000 --- a/imswitch/imreconstruct/controller/ImRecMainController.py +++ /dev/null @@ -1,45 +0,0 @@ -from imswitch.imcommon.controller import MainController -from .CommunicationChannel import CommunicationChannel -from .ImRecMainViewController import ImRecMainViewController -from .basecontrollers import ImRecWidgetControllerFactory - - -class ImRecMainController(MainController): - def __init__(self, mainView, moduleCommChannel): - self.__mainView = mainView - self.__moduleCommChannel = moduleCommChannel - - # Connect view signals - self.__mainView.sigClosing.connect(self.closeEvent) - - # Init communication channel and master controller - self.__commChannel = CommunicationChannel() - - # List of Controllers for the GUI Widgets - self.__factory = ImRecWidgetControllerFactory( - self.__commChannel, self.__moduleCommChannel - ) - - self.mainViewController = self.__factory.createController( - ImRecMainViewController, self.__mainView - ) - - def closeEvent(self): - self.__factory.closeAllCreatedControllers() - - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imreconstruct/controller/ImRecMainViewController.py b/imswitch/imreconstruct/controller/ImRecMainViewController.py deleted file mode 100644 index c4395c25f..000000000 --- a/imswitch/imreconstruct/controller/ImRecMainViewController.py +++ /dev/null @@ -1,427 +0,0 @@ -import copy -import os - -import numpy as np -import tifffile as tiff -from imswitch import IS_HEADLESS - -import imswitch.imreconstruct.view.guitools as guitools -from imswitch.imcommon.controller import PickDatasetsController -from imswitch.imreconstruct.model import DataObj, ReconObj, PatternFinder, SignalExtractor -from .DataFrameController import DataFrameController -from .MultiDataFrameController import MultiDataFrameController -from .WatcherFrameController import WatcherFrameController -from .ReconstructionViewController import ReconstructionViewController -from .ScanParamsController import ScanParamsController -from .basecontrollers import ImRecWidgetController - - -class ImRecMainViewController(ImRecWidgetController): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self._commChannel.extension = self._widget.extension - - if not IS_HEADLESS: - self.dataFrameController = self._factory.createController( - DataFrameController, self._widget.dataFrame - ) - self.multiDataFrameController = self._factory.createController( - MultiDataFrameController, self._widget.multiDataFrame - ) - self.watcherFrameController = self._factory.createController( - WatcherFrameController, self._widget.watcherFrame - ) - - if not IS_HEADLESS: - self.reconstructionController = self._factory.createController( - ReconstructionViewController, self._widget.reconstructionWidget - ) - self.scanParamsController = self._factory.createController( - ScanParamsController, self._widget.scanParamsDialog - ) - self.pickDatasetsController = self._factory.createController( - PickDatasetsController, self._widget.pickDatasetsDialog - ) - - self._signalExtractor = SignalExtractor() - self._patternFinder = PatternFinder() - - self._currentDataObj = None - self._pattern = self._widget.getPatternParams() - self._settingPatternParams = False - self._scanParDict = { - 'dimensions': [self._widget.r_l_text, self._widget.u_d_text, self._widget.b_f_text, - self._widget.timepoints_text], - 'directions': [self._widget.p_text, self._widget.p_text, self._widget.p_text], - 'steps': ['35', '35', '1', '1'], - 'step_sizes': ['35', '35', '35', '1'], - 'unidirectional': True - } - self._dataFolder = None - self._saveFolder = None - - self._commChannel.sigDataFolderChanged.connect(self.dataFolderChanged) - self._commChannel.sigSaveFolderChanged.connect(self.saveFolderChanged) - self._commChannel.sigCurrentDataChanged.connect(self.currentDataChanged) - self._commChannel.sigScanParamsUpdated.connect(self.scanParamsUpdated) - self._commChannel.sigReconstruct.connect(self.reconstruct) - - - self._widget.sigSaveReconstruction.connect(lambda: self.saveCurrent('reconstruction')) - self._widget.sigSaveReconstructionAll.connect(lambda: self.saveAll('reconstruction')) - self._widget.sigSaveCoeffs.connect(lambda: self.saveCurrent('coefficients')) - self._widget.sigSaveCoeffsAll.connect(lambda: self.saveAll('coefficients')) - self._widget.sigSetDataFolder.connect(self.setDataFolder) - self._widget.sigSetSaveFolder.connect(self.setSaveFolder) - - self._widget.sigReconstuctCurrent.connect(self.reconstructCurrent) - self._widget.sigReconstructMultiConsolidated.connect( - lambda: self.reconstructMulti(consolidate=True) - ) - self._widget.sigReconstructMultiIndividual.connect( - lambda: self.reconstructMulti(consolidate=False) - ) - self._widget.sigQuickLoadData.connect(self.quickLoadData) - self._widget.sigUpdate.connect(lambda: self.updateScanParams(applyOnCurrentRecon=True)) - - self._widget.sigShowPatternChanged.connect(self.togglePattern) - self._widget.sigFindPattern.connect(self.findPattern) - self._widget.sigShowScanParamsClicked.connect(self.showScanParamsDialog) - self._widget.sigPatternParamsChanged.connect(self.updatePattern) - - self.updatePattern() - self.updateScanParams() - - def dataFolderChanged(self, dataFolder): - self._dataFolder = dataFolder - - def saveFolderChanged(self, saveFolder): - self._saveFolder = saveFolder - - def setDataFolder(self): - dataFolder = guitools.askForFolderPath(self._widget) - if dataFolder: - self._commChannel.sigDataFolderChanged.emit(dataFolder) - - def setSaveFolder(self): - saveFolder = guitools.askForFolderPath(self._widget) - if saveFolder: - self._commChannel.sigSaveFolderChanged.emit(saveFolder) - - def findPattern(self): - self._logger.debug('Find pattern clicked') - if self._currentDataObj is None: - return - - meanData = self._currentDataObj.getMeanData() - if len(meanData) < 1: - return - - self._logger.debug('Finding pattern') - pattern = self._patternFinder.findPattern(meanData) - self._logger.debug(f'Pattern found as: {self._pattern}') - self.setPatternParams(pattern) - self.updatePattern() - - def togglePattern(self, enabled): - self._logger.debug('Toggling pattern') - self._commChannel.sigPatternVisibilityChanged.emit(enabled) - - def updatePattern(self): - if self._settingPatternParams: - return - - self._logger.debug('Updating pattern') - self._pattern = self._widget.getPatternParams() - self._commChannel.sigPatternUpdated.emit(self._pattern) - - def setPatternParams(self, pattern): - try: - self._settingPatternParams = True - self._widget.setPatternParams(*pattern) - finally: - self._settingPatternParams = False - - def updateScanParams(self, applyOnCurrentRecon=False): - self._commChannel.sigScanParamsUpdated.emit(copy.deepcopy(self._scanParDict), - applyOnCurrentRecon) - - def scanParamsUpdated(self, scanParDict): - self._scanParDict = scanParDict - - def showScanParamsDialog(self): - self.updateScanParams() - self._widget.showScanParamsDialog() - - def quickLoadData(self): - extension = self._widget.extension.value() - if extension == 'zarr': - dataPath = guitools.askForFolderPath(self._widget, defaultFolder=self._dataFolder) - elif extension == 'hdf5': - dataPath = guitools.askForFilePath(self._widget, defaultFolder=self._dataFolder) - - if dataPath: - self._logger.debug(f'Loading data at: {dataPath}') - - datasetsInFile = DataObj.getDatasetNames(dataPath) - datasetToLoad = None - if len(datasetsInFile) < 1: - # File does not contain any datasets - return - elif len(datasetsInFile) > 1: - # File contains multiple datasets - self.pickDatasetsController.setDatasets(dataPath, datasetsInFile) - if not self._widget.showPickDatasetsDialog(blocking=True): - return - - datasetsSelected = self.pickDatasetsController.getSelectedDatasets() - if len(datasetsSelected) < 1: - # No datasets selected - return - elif len(datasetsSelected) == 1: - datasetToLoad = datasetsSelected[0] - else: - # Load into multi-data list - for datasetName in datasetsSelected: - self._commChannel.sigAddToMultiData.emit(dataPath, datasetName) - self._widget.raiseMultiDataDock() - return - - name = os.path.split(dataPath)[1] - if self._currentDataObj is not None: - self._currentDataObj.checkAndUnloadData() - self._currentDataObj = DataObj(name, datasetToLoad, path=dataPath) - self._currentDataObj.checkAndLoadData() - if self._currentDataObj.dataLoaded: - self._commChannel.sigCurrentDataChanged.emit(self._currentDataObj) - self._logger.debug('Data loaded') - self._widget.raiseCurrentDataDock() - else: - pass - - def currentDataChanged(self, dataObj): - self._currentDataObj = dataObj - - # Update scan params based on new data - # TODO: What if the attribute names change in imcontrol? - dimensionMap = { - b'X': self._widget.r_l_text, - b'Y': self._widget.u_d_text, - b'Z': self._widget.b_f_text - } - try: - targetsAttr = dataObj.attrs['ScanStage:target_device'] - for i in range(0, min(3, len(targetsAttr))): - self._scanParDict['dimensions'][i] = dimensionMap[targetsAttr[i]] - except KeyError: - pass - - try: - positiveDirectionAttr = dataObj.attrs['ScanStage:positive_direction'] - for i in range(0, min(3, len(positiveDirectionAttr))): - self._scanParDict['directions'][i] = ( - self._widget.p_text if positiveDirectionAttr[i] - else self._widget.n_text - ) - except KeyError: - pass - - for i in range(0, 2): - self._scanParDict['steps'][i] = str(int(np.sqrt(dataObj.numFrames))) - - try: - stepSizesAttr = dataObj.attrs['ScanStage:axis_step_size'] - except KeyError: - pass - else: - for i in range(0, min(4, len(stepSizesAttr))): - self._scanParDict['step_sizes'][i] = str(stepSizesAttr[i] * 1000) # convert um->nm - - self.updateScanParams() - - def extractData(self, data): - fwhmNm = self._widget.getFwhmNm() - bgModelling = self._widget.getBgModelling() - if bgModelling == 'Constant': - fwhmNm = np.append(fwhmNm, 9999) # Code for constant bg - elif bgModelling == 'No background': - fwhmNm = np.append(fwhmNm, 0) # Code for zero bg - elif bgModelling == 'Gaussian': - self._logger.debug('In Gaussian version') - fwhmNm = np.append(fwhmNm, self._widget.getBgGaussianSize()) - self._logger.debug('Appended to sigmas') - else: - raise ValueError(f'Invalid BG modelling "{bgModelling}" specified; must be either' - f' "Constant", "Gaussian" or "No background".') - - sigmas = np.divide(fwhmNm, 2.355 * self._widget.getPixelSizeNm()) - - device = self._widget.getComputeDevice() - pattern = self._pattern - if device == 'CPU' or device == 'GPU': - coeffs = self._signalExtractor.extractSignal(data, sigmas, pattern, device.lower()) - else: - raise ValueError(f'Invalid device "{device}" specified; must be either "CPU" or "GPU"') - - return coeffs - - def reconstructCurrent(self): - if self._currentDataObj is None: - return - - self.reconstruct([self._currentDataObj], consolidate=False) - - def reconstructMulti(self, consolidate): - self.reconstruct(self._widget.getMultiDatas(), consolidate) - - def reconstruct(self, dataObjs, consolidate): - reconObj = None - for index, dataObj in enumerate(dataObjs): - preloaded = dataObj.dataLoaded - try: - dataObj.checkAndLoadData() - - if np.prod(np.array(self._scanParDict['steps'], dtype=int)) < dataObj.numFrames: - self._logger.error('Too many frames in data') - return - - if not consolidate or index == 0: - reconObj = ReconObj(dataObj.name, - self._scanParDict, - self._widget.r_l_text, - self._widget.u_d_text, - self._widget.b_f_text, - self._widget.timepoints_text, - self._widget.p_text, - self._widget.n_text) - - data = dataObj.data - if self._widget.bleachBool.value(): - data = self.bleachingCorrection(data) - - coeffs = self.extractData(data) - finally: - if not preloaded: - dataObj.checkAndUnloadData() - - reconObj.addCoeffsTP(coeffs) - if not consolidate: - reconObj.updateImages() - self._widget.addNewData(reconObj, reconObj.name) - - if consolidate and reconObj is not None: - reconObj.updateImages() - self._widget.addNewData(reconObj, f'{reconObj.name}_multi') - self._commChannel.sigExecutionFinished.emit(self.reconstructionController.getImage()) - - def bleachingCorrection(self, data): - correctedData = data.copy() - energy = np.sum(data, axis=(1, 2)) - for i in range(data.shape[0]): - c = (energy[0] / energy[i]) ** 4 - correctedData[i, :, :] = data[i, :, :] * c - return correctedData - - def saveCurrent(self, dataType): - """ Saves the reconstructed image or coefficeints from the current - ReconObj to a user-specified destination. """ - - filePath = guitools.askForFilePath(self._widget, - caption=f'Save {dataType}', - defaultFolder=self._saveFolder or self._dataFolder, - nameFilter='*.tiff', isSaving=True) - - if filePath: - reconObj = self.reconstructionController.getActiveReconObj() - if dataType == 'reconstruction': - self.saveReconstruction(reconObj, filePath) - elif dataType == 'coefficients': - self.saveCoefficients(reconObj, filePath) - else: - raise ValueError(f'Invalid save data type "{dataType}"') - - def saveAll(self, dataType): - """ Saves the reconstructed image or coefficeints from all available - ReconObj objects to a user-specified directory. """ - - dirPath = guitools.askForFolderPath(self._widget, - caption=f'Save all {dataType}', - defaultFolder=self._saveFolder or self._dataFolder) - - if dirPath: - for name, reconObj in self.reconstructionController.getAllReconObjs(): - # Avoid overwriting - filePath = os.path.join(dirPath, f'{name}.{dataType}.tiff') - filePathNew = filePath - numExisting = 0 - while os.path.exists(filePathNew): - numExisting += 1 - pathWithoutExt, pathExt = os.path.splitext(filePath) - filePathNew = f'{pathWithoutExt}_{numExisting}{pathExt}' - filePath = filePathNew - - # Save - if dataType == 'reconstruction': - self.saveReconstruction(reconObj, filePath) - elif dataType == 'coefficients': - self.saveCoefficients(reconObj, filePath) - else: - raise ValueError(f'Invalid save data type "{dataType}"') - - def saveReconstruction(self, reconObj, filePath): - scanParDict = reconObj.getScanParams() - vxsizec = int(float( - scanParDict['step_sizes'][scanParDict['dimensions'].index( - self._widget.r_l_text - )] - )) - vxsizer = int(float( - scanParDict['step_sizes'][scanParDict['dimensions'].index( - self._widget.u_d_text - )] - )) - vxsizez = int(float( - reconObj.scanParDict['step_sizes'][scanParDict['dimensions'].index( - self._widget.b_f_text - )] - )) - dt = int(float( - scanParDict['step_sizes'][scanParDict['dimensions'].index( - self._widget.timepoints_text - )] - )) - - self._logger.debug(f'Trying to save to: {filePath}, Vx size: {vxsizec, vxsizer, vxsizez},' - f' dt: {dt}') - # Reconstructed image - reconstrData = copy.deepcopy(reconObj.getReconstruction()) - reconstrData = reconstrData[:, 0, :, :, :, :] - reconstrData = np.swapaxes(reconstrData, 1, 2) - tiff.imwrite(filePath, reconstrData, - imagej=True, resolution=(1 / vxsizec, 1 / vxsizer), - metadata={'spacing': vxsizez, 'unit': 'nm', 'axes': 'TZCYX'}) - - def saveCoefficients(self, reconObj, filePath): - coeffs = copy.deepcopy(reconObj.getCoeffs()) - self._logger.debug(f'Shape of coeffs: {coeffs.shape}') - coeffs = np.swapaxes(coeffs, 1, 2) - tiff.imwrite(filePath, coeffs, - imagej=True, resolution=(1, 1), - metadata={'spacing': 1, 'unit': 'px', 'axes': 'TZCYX'}) - - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imreconstruct/controller/MultiDataFrameController.py b/imswitch/imreconstruct/controller/MultiDataFrameController.py deleted file mode 100644 index b10aabb8a..000000000 --- a/imswitch/imreconstruct/controller/MultiDataFrameController.py +++ /dev/null @@ -1,231 +0,0 @@ -import os - -import h5py - -from imswitch.imreconstruct.model import DataObj -from .basecontrollers import ImRecWidgetController - - -class MultiDataFrameController(ImRecWidgetController): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self._loadingData = False - self._dataFolder = None - - self._moduleCommChannel.memoryRecordings.sigDataSet.connect(self.memoryDataSet) - self._moduleCommChannel.memoryRecordings.sigDataSavedToDisk.connect( - self.memoryDataSavedToDisk - ) - self._moduleCommChannel.memoryRecordings.sigDataWillRemove.connect( - self.memoryDataWillRemove - ) - self._commChannel.sigDataFolderChanged.connect(self.dataFolderChanged) - self._commChannel.sigCurrentDataChanged.connect(self.currentDataChanged) - self._commChannel.sigAddToMultiData.connect( - lambda path, datasetName: self.makeAndAddDataObj(os.path.basename(path), datasetName, - path=path) - ) - - self._widget.sigAddDataClicked.connect(self.addDataClicked) - self._widget.sigLoadCurrentDataClicked.connect(self.loadCurrData) - self._widget.sigLoadAllDataClicked.connect(self.loadAllData) - self._widget.sigUnloadCurrentDataClicked.connect(self.unloadCurrData) - self._widget.sigUnloadAllDataClicked.connect(self.unloadAllData) - self._widget.sigDeleteCurrentDataClicked.connect(self.deleteCurrData) - self._widget.sigDeleteAllDataClicked.connect(self.deleteAllData) - self._widget.sigSetAsCurrentDataClicked.connect(self.setAsCurrentData) - self._widget.sigSaveCurrentDataClicked.connect(self.saveCurrData) - self._widget.sigSaveAllDataClicked.connect(self.saveAllData) - self._widget.sigSelectedItemChanged.connect(self.updateInfo) - - self.updateInfo() - - def dataFolderChanged(self, dataFolder): - self._dataFolder = dataFolder - - def currentDataChanged(self): - if not self._loadingData: - self._widget.setAllRowsHighlighted(False) - - def memoryDataSet(self, name, vFileItem): - data = vFileItem.data - if not isinstance(data, h5py.File): - data = h5py.File(data) - - for datasetName in data.keys(): - self.makeAndAddDataObj( - name, datasetName, path=vFileItem.filePath if vFileItem.savedToDisk else None, - file=data - ) - - def memoryDataSavedToDisk(self, name, filePath): - for dataObj in self.getDataObjsByMemRecordingName(name): - dataObj.dataPath = filePath - self._widget.setDataObjMemoryFlag(dataObj, False) - self.updateInfo() - - def memoryDataWillRemove(self, name): - for dataObj in self.getDataObjsByMemRecordingName(name): - dataObj.checkAndUnloadData() - self._widget.delDataByDataObj(dataObj) - self.updateInfo() - - def getDataObjsByMemRecordingName(self, name): - for dataObj in self._widget.getAllDataObjs(): - try: - expectedFilename = str(self._moduleCommChannel.memoryRecordings[name].data) - except KeyError: - pass - else: - if dataObj._file is not None and dataObj._file.filename == expectedFilename: - yield dataObj - - def makeAndAddDataObj(self, name, datasetName, path=None, file=None): - dataObj = DataObj(name, datasetName, path=path, file=file) - for existingDataObj in self._widget.getAllDataObjs(): - if dataObj.describesSameAs(existingDataObj): - return # Already added - - self._widget.addDataObj(name, datasetName, dataObj) - self._widget.setDataObjMemoryFlag(dataObj, path is None) - self.updateInfo() - - def addDataClicked(self): - paths = self._widget.requestFilePathsFromUser(self._dataFolder) - for path in paths: - datasetsInFile = DataObj.getDatasetNames(path) - for datasetName in datasetsInFile: - self.makeAndAddDataObj(os.path.basename(path), datasetName, path=path) - - def loadCurrData(self): - for dataObj in self._widget.getSelectedDataObjs(): - dataObj.checkAndLoadData() - self.updateInfo() - - def loadAllData(self): - for dataObj in self._widget.getAllDataObjs(): - dataObj.checkAndLoadData() - self.updateInfo() - - def unloadCurrData(self): - for dataObj in self._widget.getSelectedDataObjs(): - if dataObj.dataPath is not None: # Don't allow unloading RAM-only data - dataObj.checkAndUnloadData() - self.updateInfo() - - def unloadAllData(self): - for dataObj in self._widget.getAllDataObjs(): - if dataObj.dataPath is not None: # Don't allow unloading RAM-only data - dataObj.checkAndUnloadData() - self.updateInfo() - - def deleteCurrData(self): - if not self._widget.requestDeleteSelectedConfirmation(): - return - - self.unloadCurrData() - for dataObj in list(self._widget.getSelectedDataObjs()): - self.deleteDataObj(dataObj) - self.updateInfo() - - def deleteAllData(self): - if not self._widget.requestDeleteAllConfirmation(): - return - - self.unloadAllData() - for dataObj in list(self._widget.getAllDataObjs()): - self.deleteDataObj(dataObj) - self.updateInfo() - - def deleteDataObj(self, dataObj): - if len(list(self.getDataObjsByMemRecordingName(dataObj.name))) == 1: - del self._moduleCommChannel.memoryRecordings[dataObj.name] - # No need to call delDataByDataObj, it will be called in memoryDataWillRemove - else: - self._widget.delDataByDataObj(dataObj) - - def saveCurrData(self): - for dataObj in self._widget.getSelectedDataObjs(): - self.saveDataObj(dataObj) - - def saveAllData(self): - for dataObj in self._widget.getAllDataObjs(): - self.saveDataObj(dataObj) - - def saveDataObj(self, dataObj): - if dataObj.dataPath is not None: - return # Can't save data already on disk - - if (os.path.exists(self._moduleCommChannel.memoryRecordings.getSavePath(dataObj.name)) and - not self._widget.requestOverwriteConfirmation(dataObj.name)): - return # File exists, user does not wish to overwrite - - self._moduleCommChannel.memoryRecordings.saveToDisk(dataObj.name) - - def setAsCurrentData(self): - try: - self._loadingData = True - selectedDataObj = self._widget.getSelectedDataObj() - selectedDataObj.checkAndLoadData() - self._commChannel.sigCurrentDataChanged.emit(selectedDataObj) - - self._widget.setAllRowsHighlighted(False) - self._widget.setCurrentRowHighlighted(True) - self.updateInfo() - finally: - self._loadingData = False - - def updateInfo(self): - selectedDataObj = self._widget.getSelectedDataObj() - if selectedDataObj is None: - self._widget.setLoadedStatusText('') - else: - if selectedDataObj.dataLoaded: - self._widget.setLoadedStatusText('Yes') - else: - self._widget.setLoadedStatusText('No') - - self._widget.setLoadButtonEnabled( - selectedDataObj is not None and not selectedDataObj.dataLoaded - ) - self._widget.setUnloadButtonEnabled( - selectedDataObj is not None and selectedDataObj.dataLoaded - and selectedDataObj.dataPath is not None - ) - self._widget.setSaveButtonEnabled( - selectedDataObj is not None and selectedDataObj.dataPath is None - ) - self._widget.setDeleteButtonEnabled(selectedDataObj is not None) - self._widget.setSetCurrentButtonEnabled(selectedDataObj is not None) - - allDataObjs = list(self._widget.getAllDataObjs()) - self._widget.setLoadAllButtonEnabled( - len(allDataObjs) > 0 and any([not obj.dataLoaded for obj in allDataObjs]) - ) - self._widget.setUnloadAllButtonEnabled( - len(allDataObjs) > 0 and any([obj.dataLoaded and obj.dataPath is not None - for obj in allDataObjs]) - ) - self._widget.setSaveAllButtonEnabled( - len(allDataObjs) > 0 and any([obj.dataLoaded and obj.dataPath is None - for obj in allDataObjs]) - ) - self._widget.setDeleteAllButtonEnabled(len(allDataObjs) > 0) - - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imreconstruct/controller/ReconstructionViewController.py b/imswitch/imreconstruct/controller/ReconstructionViewController.py deleted file mode 100644 index 2c088e094..000000000 --- a/imswitch/imreconstruct/controller/ReconstructionViewController.py +++ /dev/null @@ -1,146 +0,0 @@ -import numpy as np - -from .basecontrollers import ImRecWidgetController - - -class ReconstructionViewController(ImRecWidgetController): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self._currItemInd = None - self._prevViewId = None - - self._transposeOrder = [0, 1, 2, 3, 4, 5] - self._axisStep = (0, 0, 0, 0, 0, 0) - - self._commChannel.sigScanParamsUpdated.connect(self.scanParamsUpdated) - - self._widget.sigItemSelected.connect(self.listItemChanged) - self._widget.sigAxisStepChanged.connect(self.axisStepChanged) - self._widget.sigViewChanged.connect(lambda: self.fullUpdate(levels=None)) - - def getActiveReconObj(self): - return self._widget.getCurrentItemData() - - def getAllReconObjs(self): - return self._widget.getAllItemDatas() - - def listItemChanged(self): - if self._currItemInd is not None: - currHistLevels = self._widget.getImageDisplayLevels() - prevItem = self._widget.getDataAtIndex(self._currItemInd) - prevItem.setDispLevels(currHistLevels) - - currItem = self._widget.getCurrentItemData() - retrievedLevels = \ - self._widget.getCurrentItemData().getDispLevels() if currItem is not None else None - self.fullUpdate(levels=retrievedLevels) - if retrievedLevels is not None: - self._widget.setImageDisplayLevels(retrievedLevels[0], retrievedLevels[1]) - else: - self.fullUpdate(autoLevels=True, - levels=self._widget.getCurrentItemData().getDispLevels()) - - self._currItemInd = self._widget.getCurrentItemIndex() - - def fullUpdate(self, autoLevels=False, levels=None): - reconObj = self._widget.getCurrentItemData() - if reconObj is not None: - self.setImgSlice(autoLevels=autoLevels, levels=levels) - if (self._currItemInd is None or self._prevViewId is None or - self.getViewId() != self._prevViewId): - self._widget.resetView() - else: - self._widget.clearImage() - - self._prevViewId = self.getViewId() - - def setImgSlice(self, autoLevels=False, levels=None): - data = self._widget.getCurrentItemData().reconstructed - - if self.getViewId() == 3: - transposeOrder = [0, 1, 2, 3, 5, 4] - elif self.getViewId() == 4: - transposeOrder = [0, 1, 2, 5, 3, 4] - else: - transposeOrder = [0, 2, 1, 5, 4, 3] - - im = data.transpose(*transposeOrder) - axisLabels = np.array(['Dataset', 'Base', 'Time point', 'Slice', 'X', 'Y'])[transposeOrder] - self._transposeOrder = transposeOrder - - self._widget.setImage(im, axisLabels) - if autoLevels: - self.updateLevelsRange() - elif levels is not None: - self._widget.setImageDisplayLevels(*levels) - - def getViewId(self): - viewName = self._widget.getViewName() - if viewName == 'standard': - return 3 - elif viewName == 'bottom': - return 4 - elif viewName == 'left': - return 5 - else: - raise ValueError(f'Unsupported view "{viewName}"') - - def axisStepChanged(self, newAxisStep): - baseAxisIndex = self._transposeOrder.index(1) - newBase = newAxisStep[baseAxisIndex] - if newBase != self._axisStep[baseAxisIndex]: - # Base changed, update levels range - self.updateLevelsRange(newBase) - - self._axisStep = newAxisStep - - def updateLevelsRange(self, base=None): - baseAxisIndex = self._transposeOrder.index(1) - if base is None: - base = self._axisStep[baseAxisIndex] - - # Find image at current base - im = self._widget.getImage() - indexForImage = [slice(None) for _ in range(len(im.shape))] - indexForImage[baseAxisIndex] = base - imAtBase = im[tuple(indexForImage)] - - # Update levels - levels = imAtBase.min(), imAtBase.max() - self._widget.setImageDisplayLevelsRange(*levels) - self._widget.setImageDisplayLevels(*levels) - - def updateRecon(self): - reconObj = self._widget.getCurrentItemData() - if reconObj is not None: - reconObj.updateImages() - self.fullUpdate(levels=None) - - def scanParamsUpdated(self, scanParDict, applyOnCurrentRecon): - if not applyOnCurrentRecon: - return - - reconObj = self._widget.getCurrentItemData() - if reconObj is not None: - reconObj.updateScanParams(scanParDict) - self.updateRecon() - - def getImage(self): - return self._widget.getImage() - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imreconstruct/controller/ScanParamsController.py b/imswitch/imreconstruct/controller/ScanParamsController.py deleted file mode 100644 index ead405c03..000000000 --- a/imswitch/imreconstruct/controller/ScanParamsController.py +++ /dev/null @@ -1,51 +0,0 @@ -import copy - -from .basecontrollers import ImRecWidgetController - - -class ScanParamsController(ImRecWidgetController): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self._parDict = { - 'dimensions': [self._widget.r_l_text, self._widget.u_d_text, self._widget.b_f_text, - self._widget.timepoints_text], - 'directions': [self._widget.p_text, self._widget.p_text, self._widget.p_text], - 'steps': ['35', '35', '1', '1'], - 'step_sizes': ['35', '35', '35', '1'], - 'unidirectional': True - } - - self._commChannel.sigScanParamsUpdated.connect(self.scanParamsUpdated) - self._widget.sigApplyParams.connect(self.applyParams) - - self._widget.updateValues(self._parDict) - - def scanParamsUpdated(self, parDict): - self._parDict = parDict - self._widget.updateValues(self._parDict) - - def applyParams(self): - self._parDict['dimensions'] = self._widget.getDimensions() - self._parDict['directions'] = self._widget.getDirections() - self._parDict['steps'] = self._widget.getSteps() - self._parDict['step_sizes'] = self._widget.getStepSizes() - self._parDict['unidirectional'] = self._widget.getUnidirectional() - self._commChannel.sigScanParamsUpdated.emit(copy.deepcopy(self._parDict), False) - - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imreconstruct/controller/WatcherFrameController.py b/imswitch/imreconstruct/controller/WatcherFrameController.py deleted file mode 100644 index 772c06323..000000000 --- a/imswitch/imreconstruct/controller/WatcherFrameController.py +++ /dev/null @@ -1,132 +0,0 @@ -from imswitch.imcommon.view.guitools.FileWatcher import FileWatcher -from imswitch.imreconstruct.model import DataObj -import os -from .basecontrollers import ImRecWidgetController -from imswitch.imcommon.model.logging import initLogger -import zarr -import numpy as np - -# Fallback to ome-zarr if vanilla implementation is not available -from ome_zarr.io import parse_url -from ome_zarr.writer import write_image -from time import perf_counter -import tifffile as tiff -import h5py - - -class WatcherFrameController(ImRecWidgetController): - """ Linked to WatcherFrame. """ - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.attrs = None - self.recPath = None - self._widget.sigWatchChanged.connect(self.toggleWatch) - self._widget.sigChangeFolder.connect(lambda: self._widget.updateFileList(self._commChannel.extension.value())) - self._commChannel.sigExecutionFinished.connect(self.executionFinished) - self._commChannel.extension.sigValueChanged.connect(self.extensionChanged) - self.execution = False - self.toExecute = [] - self.current = None - self.t0 = None - self.extension = None - self.__logger = initLogger(self, tryInheritParent=False) - - def toggleWatch(self, checked): - if checked: - self.execution = False - self.extension = self._commChannel.extension.value() - rec_dir = self._widget.path + '/rec' - if not os.path.isdir(rec_dir): - os.mkdir(rec_dir) - self.watcher = FileWatcher(self._widget.path, self.extension, 1) - self._widget.updateFileList(self.extension) - files = self.watcher.filesInDirectory() - self.toExecute = files - self.watcher.sigNewFiles.connect(self.newFiles) - self.watcher.start() - self.runNextFile() - else: - self.execution = False - self.watcher.stop() - self.watcher.quit() - self.toExecute = [] - - def extensionChanged(self): - self._widget.updateFileList(self._commChannel.extension.value()) - self._widget.watchCheck.setChecked(False) - - def newFiles(self, files): - self._widget.updateFileList(self.extension) - self.toExecute.extend(files) - try: - self.runNextFile() - except OSError: - self.__logger.error("Writing in progress.") - self.watcher.removeFromList(files) - - def runNextFile(self): - if len(self.toExecute) and not self.execution: - newFile = self.toExecute.pop() - self.current = self._widget.path + '/' + newFile - self.recPath = self._widget.path + '/' + 'rec' + '/' + 'rec_' + newFile - datasets = DataObj.getDatasetNames(self.current) - dataObjs = [] - for d in datasets: - file, _ = DataObj._open(self.current, d) - dataObj = DataObj(os.path.basename(self.current), d, path=self.current, file=file) - dataObj.checkLock() - dataObjs.append(dataObj) - self.attrs = dataObj.attrs - self.execution = True - self.t0 = perf_counter() - self._commChannel.sigReconstruct.emit(dataObjs, True) - - def executionFinished(self, image): - if self.execution: - self.execution = False - self.saveImage(image) - diff = perf_counter() - self.t0 - self.watcher.addToLog(self.current, [str(self.t0), str(diff)]) - self._widget.updateFileList(self.extension) - self.runNextFile() - - def saveImage(self, image): - image = np.squeeze(image[:, 0, :, :, :, :]) - image = np.reshape(image, (1, *image.shape)) - extension = self._commChannel.extension.value() - if not os.path.exists(self.recPath): - if extension == 'zarr': - store = parse_url(self.recPath + '.tmp', mode="w").store - root = zarr.group(store=store) - root.attrs["ImSwitchData"] = self.attrs["ImSwitchData"] - write_image(image=image, group=root, axes="zyx") - store.close() - os.rename(self.recPath + '.tmp', self.recPath) - tiff.imwrite(self.recPath.split('.')[0] + ".tiff", image) - if extension == 'hdf5': - h = h5py.File(self.recPath + '.tmp', 'w') - dset = h.create_dataset('data', data=image) - self.__logger.debug(type(self.attrs)) - for k in self.attrs.keys(): - dset.attrs[k] = self.attrs[k] - h.close() - os.rename(self.recPath + '.tmp', self.recPath) - tiff.imwrite(self.recPath.split('.')[0] + ".tiff", image) - - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imreconstruct/controller/__init__.py b/imswitch/imreconstruct/controller/__init__.py deleted file mode 100644 index 99542be40..000000000 --- a/imswitch/imreconstruct/controller/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .ImRecMainController import ImRecMainController diff --git a/imswitch/imreconstruct/controller/basecontrollers.py b/imswitch/imreconstruct/controller/basecontrollers.py deleted file mode 100644 index c70568d3f..000000000 --- a/imswitch/imreconstruct/controller/basecontrollers.py +++ /dev/null @@ -1,36 +0,0 @@ -from imswitch.imcommon.controller import WidgetController, WidgetControllerFactory - - -class ImRecWidgetControllerFactory(WidgetControllerFactory): - """ Factory class for creating a ImRecWidgetController object. """ - - def __init__(self, commChannel, moduleCommChannel): - super().__init__(commChannel=commChannel, moduleCommChannel=moduleCommChannel) - - -class ImRecWidgetController(WidgetController): - """ Superclass for all ImRecWidgetController. """ - - def __init__(self, commChannel, *args, **kwargs): - # Protected attributes, which should only be accessed from controller and its subclasses - self._commChannel = commChannel - - # Init superclass - super().__init__(*args, **kwargs) - - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imreconstruct/model/DataObj.py b/imswitch/imreconstruct/model/DataObj.py deleted file mode 100644 index 4150ac4d6..000000000 --- a/imswitch/imreconstruct/model/DataObj.py +++ /dev/null @@ -1,169 +0,0 @@ -import os - -import h5py -import numpy as np -import tifffile as tiff -import zarr - -from imswitch.imcommon.model import initLogger - - -class DataObj: - def __init__(self, name, datasetName, *, path=None, file=None): - self.__logger = initLogger(self, instanceName=f'{name}/{datasetName}') - - self.name = name - self.dataPath = path - self.darkFrame = None - self._meanData = None - self._file = file - self._data = None - self._datasetName = datasetName - self._attrs = None - self.__logger = initLogger(self, tryInheritParent=False) - - @property - def data(self): - if self._data is not None: - return self._data - - if isinstance(self._file, h5py.File): - self._data = np.array(self._file.get(self._datasetName)[:]) - elif isinstance(self._file, tiff.TiffFile): - self._data = self._file.asarray() - elif isinstance(self._file, zarr.hierarchy.Group): - self._data = np.array(self._file[self._datasetName]) - return self._data - - @property - def attrs(self): - if self._attrs is not None: - return self._attrs - - if isinstance(self._file, h5py.File): - attrs = dict(self._file.attrs) - attrs.update(dict(self._file[self.datasetName].attrs)) - self._attrs = attrs - if isinstance(self._file, zarr.hierarchy.Group): - attrs = dict(self._file.attrs) - attrs.update(dict(self._file[self.datasetName].attrs)) - self._attrs = attrs - return self._attrs - - @property - def dataLoaded(self): - return self.data is not None - - @property - def datasetName(self): - return self._datasetName - - @property - def numFrames(self): - return np.shape(self.data)[0] if self.data is not None else None - - def checkAndLoadData(self): - if not self.dataLoaded: - try: - self._file, self._datasetName = DataObj._open(self.dataPath, self._datasetName) - if self.data is not None: - self.__logger.debug('Data loaded') - except Exception: - pass - - def checkAndLoadDarkFrame(self): - pass - - def checkAndUnloadData(self): - if self._file is not None: - try: - self._file.close() - except Exception: - self.__logger.error('Error closing file') - - self._file = None - self._data = None - self._attrs = None - self._meanData = None - - def getMeanData(self): - if self._meanData is None: - self._meanData = np.array(np.mean(self.data, 0), dtype=np.float32) - - return self._meanData - - @staticmethod - def getDatasetNames(path): - file, _ = DataObj._open(path, allowMultipleDatasets=True) - try: - if isinstance(file, h5py.File) or isinstance(file, zarr.hierarchy.Group): - return list(file.keys()) - elif isinstance(file, tiff.TiffFile): - return ['default'] - else: - raise ValueError(f'Unsupported file type "{type(file).__name__}"') - finally: - if isinstance(file, h5py.File): - file.close() - - @staticmethod - def _open(path, datasetName=None, allowMultipleDatasets=False): - ext = os.path.splitext(path)[1] - if ext in ['.hdf5', '.hdf']: - file = h5py.File(path, 'r') - if len(file) < 1: - raise RuntimeError('File does not contain any datasets') - elif len(file) > 1 and datasetName is None and not allowMultipleDatasets: - raise RuntimeError('File contains multiple datasets') - - if datasetName is None and not allowMultipleDatasets: - datasetName = list(file.keys())[0] - - return file, datasetName - elif ext in ['.tiff', '.tif']: - return tiff.TiffFile(path), None - elif ext in ['.zarr']: - file = zarr.open(path, mode='r') - if len(file) < 1: - raise RuntimeError('File does not contain any datasets') - elif len(file) > 1 and datasetName is None and not allowMultipleDatasets: - raise RuntimeError('File contains multiple datasets') - - if datasetName is None and not allowMultipleDatasets: - datasetName = list(file.keys())[0] - - return file, datasetName - else: - raise ValueError(f'Unsupported file extension "{ext}"') - - def describesSameAs(self, other): # Don't use __eq__, that makes the class unhashable - try: - sameFile = self._file == other._file or self._file.filename == other._file.filename - except AttributeError: - sameFile = False - - return (self.name == other.name and - self.dataPath == other.dataPath and - sameFile and - self.datasetName == other.datasetName) - - def checkLock(self): - if self.attrs['writing']: - raise OSError('Writing in progress') - - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imreconstruct/model/PatternFinder.py b/imswitch/imreconstruct/model/PatternFinder.py deleted file mode 100644 index cced65d22..000000000 --- a/imswitch/imreconstruct/model/PatternFinder.py +++ /dev/null @@ -1,117 +0,0 @@ -import numpy as np -from scipy.optimize import curve_fit -from scipy.signal import find_peaks - - -class PatternFinder: - def findPattern(self, image): - """ Finds the offsets and periods of the pattern in the image. """ - image = image - image.min() - thresh = image.max() / 3 - image[image < thresh] = 0 - - numRows = np.size(image, 0) - numCols = np.size(image, 1) - - meanAlongRows = image.mean(1) - meanAlongCols = image.mean(0) - - # log_ft_image[0, 0:int(numCols/2)]) - horiFft = np.fft.fft(meanAlongCols)[0:int(numCols / 2) + 1] - # log_ft_image[0:int(numRows/2), 0]) - vertFft = np.fft.fft(meanAlongRows)[0:int(numRows / 2) + 1] - - horiPeaks = find_peaks(np.log(np.abs(horiFft)), prominence=[0, np.inf], width=0, height=0) - vertPeaks = find_peaks(np.log(np.abs(vertFft)), prominence=[0, np.inf], width=0, height=0) - - bestPeakHori = self.findBestPeak(horiPeaks) - bestPeakHoriInd = horiPeaks[0][bestPeakHori] - peakWidthHori = horiPeaks[1]['widths'][bestPeakHori] - - bestPeakVert = self.findBestPeak(vertPeaks) - bestPeakVertInd = vertPeaks[0][bestPeakVert] - peakWidthVert = vertPeaks[1]['widths'][bestPeakVert] - - minRWindow = max(0, bestPeakHoriInd - int(3 * peakWidthHori)) - maxRWindow = min(bestPeakHoriInd + int(3 * peakWidthHori), len(horiFft)) - minCWindow = max(0, bestPeakVertInd - int(3 * peakWidthVert)) - maxCWindow = min(bestPeakVertInd + int(3 * peakWidthVert), len(vertFft)) - - windowR = np.arange(minRWindow, maxRWindow) - windowC = np.arange(minCWindow, maxCWindow) - - croppedPeakHori = np.abs(horiFft[windowR]) - croppedPeakVert = np.abs(vertFft[windowC]) - - def gaussFunction(x, a, b, x0, sigma): - return a + b * np.exp(-(x - x0) ** 2 / (2 * sigma ** 2)) - - aStartHori = croppedPeakHori.min() - bStartHori = croppedPeakHori.max() - croppedPeakHori.min() - poptR, pcovR = curve_fit( - gaussFunction, windowR, croppedPeakHori, - p0=[aStartHori, bStartHori, bestPeakHoriInd, peakWidthHori / 2.355] - ) - - optHoriPeakInd = poptR[2] - optPerHoriPx = image.shape[1] / optHoriPeakInd - - aStartVert = croppedPeakVert.min() - bStartVert = croppedPeakVert.max() - croppedPeakVert.min() - poptC, pcovC = curve_fit( - gaussFunction, windowC, croppedPeakVert, - p0=[aStartVert, bStartVert, bestPeakVertInd, peakWidthVert / 2.355] - ) - - optVertPeakInd = poptC[2] - optPerVertPx = image.shape[0] / optVertPeakInd - - N = numCols - x = np.linspace(0, N - 1, N) - y = np.exp(-1j * 2 * np.pi * x * optHoriPeakInd / N) - ftValHori = np.multiply(meanAlongCols, y).sum() - offsetHori = np.mod((-np.angle(ftValHori) / np.pi) * 0.5 * optPerHoriPx, optPerHoriPx) - - N = numRows - x = np.linspace(0, N - 1, N) - y = np.exp(-1j * 2 * np.pi * x * optVertPeakInd / N) - ftValVert = np.multiply(meanAlongRows, y).sum() - offsetVert = np.mod((-np.angle(ftValVert) / np.pi) * 0.5 * optPerVertPx, optPerVertPx) - - return [offsetVert, offsetHori, optPerVertPx, optPerHoriPx] - - def findBestPeak(self, peaks): - """ Finds the best peak in a list of peaks. """ - bestTwoPeaks = peaks[1]['prominences'].argsort()[-2::][::-1] - prom1 = peaks[1]['prominences'][bestTwoPeaks[0]] - prom2 = peaks[1]['prominences'][bestTwoPeaks[1]] - if abs((prom1 - prom2) / (prom1 + prom2)) < 0.2: - height1 = peaks[1]['peak_heights'][bestTwoPeaks[0]] - height2 = peaks[1]['peak_heights'][bestTwoPeaks[1]] - if (height1 - height2) / (height1 - height2) < 0.2: - bestPeak = bestTwoPeaks.min() - else: - heights = np.array([height1, height2]) - highest = heights.argmax() - bestPeak = bestTwoPeaks[highest] - else: - bestPeak = bestTwoPeaks[0] - - return bestPeak - - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imreconstruct/model/ReconObj.py b/imswitch/imreconstruct/model/ReconObj.py deleted file mode 100644 index ac808eed1..000000000 --- a/imswitch/imreconstruct/model/ReconObj.py +++ /dev/null @@ -1,186 +0,0 @@ -import numpy as np - -from imswitch.imcommon.model import initLogger - - -class ReconObj: - def __init__(self, name, scanParDict, r_l_text, u_d_text, b_f_text, - timepoints_text, p_text, n_text, *args, **kwargs): - super().__init__(*args, **kwargs) - self.__logger = initLogger(self, instanceName=name) - - self.r_l_text = r_l_text - self.u_d_text = u_d_text - self.b_f_text = b_f_text - self.timepoints_text = timepoints_text - self.p_text = p_text - self.n_tetx = n_text - - self.name = name - self.coeffs = None - self.reconstructed = None - self.scanParDict = scanParDict.copy() - - self.dispLevels = None - - def setDispLevels(self, levels): - self.dispLevels = levels - - def getDispLevels(self): - return self.dispLevels - - def getReconstruction(self): - return self.reconstructed - - def getCoeffs(self): - return self.coeffs - - def getScanParams(self): - return self.scanParDict - - def addCoeffsTP(self, inCoeffs): - """ Adds a set of coefficients to the existing set of coefficients. """ - if self.coeffs is None: - # self.__logger.debug(f'In if, shape is: {np.shape(inCoeffs)}') - # self.__logger.debug(f'Coeffs are: {inCoeffs}') - self.coeffs = np.array([inCoeffs]) - else: - # self.__logger.debug(f'In else, shape self.data is: {np.shape(inCoeffs)}') - # self.__logger.debug(f'In else, shape inCoeffs is: {np.shape(inCoeffs)}') - self.__logger.debug(f'Max in coeffs: {inCoeffs.max()}') - inCoeffs = np.expand_dims(inCoeffs, 0) - self.coeffs = np.vstack((self.coeffs, inCoeffs)) - - def updateScanParams(self, scanParDict): - self.scanParDict = scanParDict - - def updateImages(self): - """Updates the variable self.reconstructed which contains the final - reconstructed and reassigned images of ALL the bases given to the - reconstructor""" - if self.coeffs is not None: - datasets = np.shape(self.coeffs)[0] - bases = np.shape(self.coeffs)[1] - self.reconstructed = np.array([ - [self.coeffsToImage(self.coeffs[ds][b], self.scanParDict) for b in range(0, bases)] - for ds in range(0, datasets) - ]) - self.__logger.debug(f'Shape of reconstructed: {np.shape(self.reconstructed)}') - else: - self.__logger.error('Cannot update images without coefficients') - - def addGridOfCoeffs(self, im, coeffs, t, s, r0, c0, pr, pc): - # self.__logger.debug(f'Timepoint: {t}') - # self.__logger.debug(f'shape if im: {im.shape}') - # self.__logger.debug(f'shape if coeffts[i]: {coeffs.shape}') - # self.__logger.debug(f'r0: {r0}') - # self.__logger.debug(f'c0: {c0}') - # self.__logger.debug(f'pr: {pr}') - # self.__logger.debug(f'pc: {pc}') - im[t, s, r0::pr, c0::pc] = coeffs - - def coeffsToImage(self, coeffs, scanParDict): - """Takes the 4d matrix of coefficients from the signal extraction and - reshapes into images according to given parameters""" - frames = np.shape(coeffs)[0] - dim0Side = int(scanParDict['steps'][0]) - dim1Side = int(scanParDict['steps'][1]) - dim2Side = int(scanParDict['steps'][2]) - dim3Side = int(scanParDict['steps'][3]) # Always timepoints - if not frames == dim0Side * dim1Side * dim2Side * dim3Side: - self.__logger.error('Wrong dimensional data') - pass - - timepoints = int( - scanParDict['steps'][scanParDict['dimensions'].index(self.timepoints_text)] - ) - slices = int(scanParDict['steps'][scanParDict['dimensions'].index(self.b_f_text)]) - sqRows = int(scanParDict['steps'][scanParDict['dimensions'].index(self.u_d_text)]) - sqCols = int(scanParDict['steps'][scanParDict['dimensions'].index(self.r_l_text)]) - - im = np.zeros( - [timepoints, slices, sqRows * np.shape(coeffs)[1], sqCols * np.shape(coeffs)[2]], - dtype=np.float32 - ) - for i in np.arange(np.shape(coeffs)[0]): - - t = int(np.floor(i / (frames / dim3Side))) - - slow = int(np.mod(i, frames / timepoints) / (dim0Side * dim1Side)) - mid = int(np.mod(i, dim0Side * dim1Side) / dim0Side) - fast = np.mod(i, dim0Side) - - if not scanParDict['unidirectional']: - oddMidStep = np.mod(mid, 2) - fast = (1 - oddMidStep) * fast + oddMidStep * (dim1Side - 1 - fast) - - neg = (int(scanParDict['directions'][0] == 'neg'), - int(scanParDict['directions'][1] == 'neg'), - int(scanParDict['directions'][2] == 'neg')) - - """Adjust for positive or negative direction""" - fast = (1 - neg[0]) * fast + neg[0] * (dim0Side - 1 - fast) - mid = (1 - neg[1]) * mid + neg[1] * (dim1Side - 1 - mid) - slow = (1 - neg[2]) * slow + neg[2] * (dim2Side - 1 - slow) - - """Place dimensions in correct row/col/slice""" - if scanParDict['dimensions'][0] == self.r_l_text: - if scanParDict['dimensions'][1] == self.u_d_text: - c = fast - pc = dim0Side - r = mid - pr = dim1Side - s = slow - else: - c = fast - pc = dim0Side - r = slow - pr = dim2Side - s = mid - elif scanParDict['dimensions'][0] == self.u_d_text: - if scanParDict['dimensions'][1] == self.r_l_text: - c = mid - pc = dim1Side - r = fast - pr = dim0Side - s = slow - else: - c = slow - pc = dim2Side - r = fast - pr = dim0Side - s = mid - else: - if scanParDict['dimensions'][1] == self.r_l_text: - c = mid - pc = dim1Side - r = slow - pr = dim2Side - s = fast - else: - c = slow - pc = dim2Side - r = mid - pr = dim1Side - s = fast - - self.addGridOfCoeffs(im, coeffs[i], t, s, r, c, pr, pc) - - return im - - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imreconstruct/model/SignalExtractor.py b/imswitch/imreconstruct/model/SignalExtractor.py deleted file mode 100644 index 232c515bb..000000000 --- a/imswitch/imreconstruct/model/SignalExtractor.py +++ /dev/null @@ -1,134 +0,0 @@ -import ctypes -import os -import time - -import numpy as np - -from imswitch.imcommon.model import dirtools, initLogger - -IS_WINDOWS = True -if os.name != 'nt': - IS_WINDOWS = False - print('This module does unfortunately currently not support non-Windows operating systems.') - -class SignalExtractor: - """ This class takes the raw data together with pre-set - parameters and recontructs and stores the final images (for the different - bases). - """ - - def __init__(self): - self.__logger = initLogger(self) - - - # TODO: Support non-Windows OS - if IS_WINDOWS: - # This is needed by the DLL containing CUDA code. - # ctypes.cdll.LoadLibrary(os.environ['CUDA_PATH_V9_0'] + '\\bin\\cudart64_90.dll') - ctypes.cdll.LoadLibrary( - os.path.join(dirtools.DataFileDirs.Libs, 'cudart64_90.dll') - ) - self.ReconstructionDLL = ctypes.cdll.LoadLibrary( - os.path.join(dirtools.DataFileDirs.Libs, 'GPU_acc_recon.dll') - ) - - def make3dPtrArray(self, inData): - assert len(np.shape(inData)) == 3, \ - 'Trying to make 3D ctypes.POINTER array out of non-3D data' - - data = inData - slices = data.shape[0] - - pythPtrArray = [] - - for j in range(0, slices): - ptr = data[j].ctypes.data_as(ctypes.POINTER(ctypes.c_ubyte)) - pythPtrArray.append(ptr) - cPtrArray = (ctypes.POINTER(ctypes.c_ubyte) * slices)(*pythPtrArray) - return cPtrArray - - def make4dPtrArray(self, inData): - assert len(np.shape(inData)) == 4, \ - 'Trying to make 4D ctypes.POINTER array out of non-4D data' - - data = inData - groups = data.shape[0] - slices = data.shape[1] - - pythPtrArray = [] - - for i in range(0, groups): - tempPythPtrArray = [] - for j in range(0, slices): - ptr = data[i][j].ctypes.data_as(ctypes.POINTER(ctypes.c_ubyte)) - tempPythPtrArray.append(ptr) - tempCPtrArray = (ctypes.POINTER(ctypes.c_ubyte) * slices)(*tempPythPtrArray) - pythPtrArray.append(ctypes.cast(tempCPtrArray, ctypes.POINTER(ctypes.c_ubyte))) - cPtrArray = (ctypes.POINTER(ctypes.c_ubyte) * groups)(*pythPtrArray) - - return cPtrArray - - def extractSignal(self, data, sigmas, pattern, dev): - """Extracts the signal of the data according to given parameters. - Output is a 4D matrix where first dimension is base and last three - are frame and pixel coordinates.""" - - self.__logger.debug(f'Max in data: {data.max()}') - dataPtrArray = self.make3dPtrArray(data) - p = ctypes.c_float * 4 - # Minus one due to different (1 or 0) indexing in C/Matlab - cPattern = p(pattern[0], pattern[1], pattern[2], pattern[3]) - cNumBases = ctypes.c_int(np.size(sigmas)) - self.__logger.debug(f'Sigmas: {sigmas}') - sigmas = np.array(sigmas, dtype=np.float32) - cSigmas = np.ctypeslib.as_ctypes(sigmas) # s(1, 10) - cGridRows = ctypes.c_int(0) - cGridCols = ctypes.c_int(0) - cImRows = ctypes.c_int(data.shape[1]) - cImCols = ctypes.c_int(data.shape[2]) - cImSlices = ctypes.c_int(data.shape[0]) - - self.ReconstructionDLL.calc_coeff_grid_size( - cImRows, cImCols, - ctypes.byref(cGridRows), ctypes.byref(cGridCols), - ctypes.byref(cPattern) - ) - self.__logger.debug('Coeff grid calculated') - - resCoeffs = np.zeros(dtype=np.float32, shape=(cNumBases.value, cImSlices.value, - cGridRows.value, cGridCols.value)) - resPtr = self.make4dPtrArray(resCoeffs) - t = time.time() - - if dev == 'cpu': - extractionFunction = self.ReconstructionDLL.extract_signal_CPU - elif dev == 'gpu': - extractionFunction = self.ReconstructionDLL.extract_signal_GPU - else: - raise ValueError(f'Device must be either "cpu" or "gpu"; {dev} given') - - extractionFunction(cImRows, cImCols, - cImSlices, ctypes.byref(cPattern), - cNumBases, ctypes.byref(cSigmas), - ctypes.byref(dataPtrArray), ctypes.byref(resPtr)) - - elapsed = time.time() - t - self.__logger.debug(f'Signal extraction performed in {elapsed} seconds') - return resCoeffs - - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imreconstruct/model/__init__.py b/imswitch/imreconstruct/model/__init__.py deleted file mode 100644 index 2d2fd2ab7..000000000 --- a/imswitch/imreconstruct/model/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .DataObj import DataObj -from .PatternFinder import PatternFinder -from .ReconObj import ReconObj -from .SignalExtractor import SignalExtractor diff --git a/imswitch/imreconstruct/view/DataEditDialog.py b/imswitch/imreconstruct/view/DataEditDialog.py deleted file mode 100644 index 45e1102e6..000000000 --- a/imswitch/imreconstruct/view/DataEditDialog.py +++ /dev/null @@ -1,134 +0,0 @@ -import pyqtgraph as pg -from qtpy import QtCore, QtWidgets - -from .guitools import BetterPushButton - - -class DataEditDialog(QtWidgets.QDialog): - """For future data editing window, for example to remove rearrange frames - or devide into seperate datasets""" - - sigImageSliceChanged = QtCore.Signal(int) - sigShowMeanClicked = QtCore.Signal() - sigSetDarkFrameClicked = QtCore.Signal() - - def __init__(self, parent, *args, **kwargs): - super().__init__(parent, *args, **kwargs) - - self.setWindowTitle('Data Edit/Complement') - - # Data view Widget - imageWidget = pg.GraphicsLayoutWidget() - self.imgVb = imageWidget.addViewBox(row=0, col=0) - self.imgVb.setMouseMode(pg.ViewBox.PanMode) - self.img = pg.ImageItem(axisOrder='row-major') - self.img.setTransform(self.img.transform().translate(-0.5, -0.5)) - self.imgVb.addItem(self.img) - self.imgVb.setAspectLocked(True) - self.imgHist = pg.HistogramLUTItem(image=self.img) - imageWidget.addItem(self.imgHist, row=0, col=1) - - self.showMeanBtn = BetterPushButton() - self.showMeanBtn.setText('Show mean image') - self.showMeanBtn.pressed.connect(self.sigShowMeanClicked) - - frameLabel = QtWidgets.QLabel('Frame # ') - self.frameNum = QtWidgets.QLineEdit('0') - self.frameNum.textChanged.connect(self.frameNumberChanged) - self.frameNum.setFixedWidth(45) - - self.dataName = QtWidgets.QLabel('File:') - self.datasetName = QtWidgets.QLabel('Dataset:') - numFramesLabel = QtWidgets.QLabel('No. frames:') - self.numFrames = QtWidgets.QLabel('') - - self.slider = QtWidgets.QSlider(QtCore.Qt.Horizontal, self) - self.slider.setMinimum(0) - self.slider.setMaximum(0) - self.slider.setTickInterval(5) - self.slider.setSingleStep(1) - self.slider.valueChanged[int].connect(self.sliderMoved) - - self.actionBtns = DataEditActions() - self.actionBtns.sigSetDarkFrame.connect(self.sigSetDarkFrameClicked) - - # Dark frame view widget - dfWidget = pg.GraphicsLayoutWidget() - self.dfVb = dfWidget.addViewBox(row=0, col=0) - self.dfVb.setMouseMode(pg.ViewBox.PanMode) - self.df = pg.ImageItem(axisOrder='row-major') - self.df.setTransform(self.df.transform().translate(-0.5, -0.5)) - self.dfVb.addItem(self.df) - self.dfVb.setAspectLocked(True) - self.dfHist = pg.HistogramLUTItem(image=self.df) - dfWidget.addItem(self.dfHist, row=0, col=1) - - layout = QtWidgets.QGridLayout() - self.setLayout(layout) - - layout.addWidget(self.dataName, 0, 0, 1, 4) - layout.addWidget(self.datasetName, 0, 4, 1, 2) - layout.addWidget(self.showMeanBtn, 1, 0) - layout.addWidget(self.slider, 1, 1, 1, 3) - layout.addWidget(frameLabel, 1, 4) - layout.addWidget(self.frameNum, 1, 5) - layout.addWidget(numFramesLabel, 2, 4) - layout.addWidget(self.numFrames, 2, 5) - layout.addWidget(imageWidget, 3, 0, 1, 6) - layout.addWidget(self.actionBtns, 0, 6) - layout.addWidget(dfWidget, 0, 7, -1, 1) - - def sliderMoved(self): - frameNumber = self.slider.value() - self.frameNum.setText(str(frameNumber)) - self.sigImageSliceChanged.emit(frameNumber) - - def frameNumberChanged(self): - try: - frameNumber = int(self.frameNum.text()) - except TypeError: - return - - self.slider.setValue(frameNumber) - self.sigImageSliceChanged.emit(frameNumber) - - def setImage(self, image, autoLevels): - self.img.setImage(image, autoLevels=autoLevels) - - def updateDataProperties(self, dataName, datasetName, numFrames): - self.dataName.setText(f'File: {dataName}') - self.datasetName.setText(f'Dataset: {datasetName}') - self.numFrames.setText(str(numFrames)) - self.slider.setMaximum(numFrames - 1) - - -class DataEditActions(QtWidgets.QFrame): - sigSetDarkFrame = QtCore.Signal() - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - setDarkFrameBtn = BetterPushButton('Set Dark/Offset frame') - setDarkFrameBtn.clicked.connect(self.sigSetDarkFrame) - - layout = QtWidgets.QGridLayout() - self.setLayout(layout) - - layout.addWidget(setDarkFrameBtn, 0, 0) - - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imreconstruct/view/DataFrame.py b/imswitch/imreconstruct/view/DataFrame.py deleted file mode 100644 index 0eac96a88..000000000 --- a/imswitch/imreconstruct/view/DataFrame.py +++ /dev/null @@ -1,138 +0,0 @@ -import pyqtgraph as pg -from qtpy import QtCore, QtWidgets - -from .DataEditDialog import DataEditDialog -from .guitools import BetterPushButton - - -class DataFrame(QtWidgets.QFrame): - """Frame for showing and examining the raw data""" - - # Signals - sigShowMeanClicked = QtCore.Signal() - sigAdjustDataClicked = QtCore.Signal() - sigUnloadDataClicked = QtCore.Signal() - sigFrameNumberChanged = QtCore.Signal(int) - sigFrameSliderChanged = QtCore.Signal(int) - - # Methods - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - # Image Widget - imageWidget = pg.GraphicsLayoutWidget() - self.imgVb = imageWidget.addViewBox(row=0, col=0) - self.imgVb.setMouseMode(pg.ViewBox.PanMode) - self.img = pg.ImageItem(axisOrder='row-major') - self.img.setTransform(self.img.transform().translate(-0.5, -0.5)) - self.imgVb.addItem(self.img) - self.imgVb.setAspectLocked(True) - self.imgHist = pg.HistogramLUTItem(image=self.img) - imageWidget.addItem(self.imgHist, row=0, col=1) - - self.showMeanBtn = BetterPushButton() - self.showMeanBtn.setText('Show mean image') - self.showMeanBtn.clicked.connect(self.sigShowMeanClicked) - - self.adjustDataBtn = BetterPushButton() - self.adjustDataBtn.setText('Adjust/compl. data') - self.adjustDataBtn.clicked.connect(self.sigAdjustDataClicked) - - self.unloadDataBtn = BetterPushButton() - self.unloadDataBtn.setText('Unload data') - self.unloadDataBtn.clicked.connect(self.sigUnloadDataClicked) - - frameLabel = QtWidgets.QLabel('Frame # ') - self.frameNum = QtWidgets.QLineEdit('0') - self.frameNum.textChanged.connect(lambda x: (x.isdigit() and - self.setCurrentFrame(int(x)) and - self.sigFrameNumberChanged.emit(int(x)))) - self.frameNum.setFixedWidth(45) - - self.dataName = QtWidgets.QLabel('File:') - self.datasetName = QtWidgets.QLabel('Dataset:') - numFramesLabel = QtWidgets.QLabel('No. frames:') - self.numFrames = QtWidgets.QLabel('') - - self.slider = QtWidgets.QSlider(QtCore.Qt.Horizontal, self) - self.slider.setMinimum(0) - self.slider.setMaximum(0) - self.slider.setTickInterval(5) - self.slider.setSingleStep(1) - self.slider.valueChanged[int].connect(self.setCurrentFrame) - self.slider.valueChanged[int].connect(self.sigFrameSliderChanged) - - self.patternScatter = pg.ScatterPlotItem() - self.patternScatter.setData( - pos=[[0, 0], [10, 10], [20, 20], [30, 30], [40, 40]], - pen=pg.mkPen(color=(255, 0, 0), width=0.5, - style=QtCore.Qt.SolidLine, antialias=True), - brush=pg.mkBrush(color=(255, 0, 0), antialias=True), size=1, - pxMode=False) - - self.editWdw = DataEditDialog(self) - - layout = QtWidgets.QGridLayout() - self.setLayout(layout) - - layout.addWidget(self.dataName, 0, 0, 1, 4) - layout.addWidget(self.datasetName, 0, 4, 1, 2) - layout.addWidget(self.showMeanBtn, 1, 0) - layout.addWidget(self.slider, 1, 1, 1, 3) - layout.addWidget(frameLabel, 1, 4) - layout.addWidget(self.frameNum, 1, 5) - layout.addWidget(self.adjustDataBtn, 2, 0) - layout.addWidget(self.unloadDataBtn, 2, 1) - layout.addWidget(numFramesLabel, 2, 4) - layout.addWidget(self.numFrames, 2, 5) - layout.addWidget(imageWidget, 3, 0, 1, -1) - - self._showPattern = False - - def setShowPattern(self, value): - self._showPattern = value - - if value: - self.imgVb.addItem(self.patternScatter) - else: - self.imgVb.removeItem(self.patternScatter) - - def showEditWindow(self): - self.editWdw.show() - - def setImage(self, im, autoLevels): - self.img.setImage(im, autoLevels) - - def setPatternGridData(self, x, y): - self.patternScatter.setData(x, y) - - def setCurrentFrame(self, value): - self.frameNum.setText(str(value)) - self.slider.setValue(value) - - def setNumFrames(self, value): - self.numFrames.setText(str(value)) - self.slider.setMaximum(value - 1 if value > 0 else 0) - - def setDataName(self, value): - self.dataName.setText(f'File: {value}') - - def setDatasetName(self, value): - self.datasetName.setText(f'Dataset: {value}') - - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imreconstruct/view/ImRecMainView.py b/imswitch/imreconstruct/view/ImRecMainView.py deleted file mode 100644 index 18d3bd47d..000000000 --- a/imswitch/imreconstruct/view/ImRecMainView.py +++ /dev/null @@ -1,321 +0,0 @@ -import numpy as np -import pyqtgraph as pg -from imswitch import IS_HEADLESS -from pyqtgraph.dockarea import Dock, DockArea -from pyqtgraph.parametertree import Parameter, ParameterTree -from qtpy import QtCore, QtWidgets - -from imswitch.imcommon.view import PickDatasetsDialog -from .DataFrame import DataFrame -from .MultiDataFrame import MultiDataFrame -from .WatcherFrame import WatcherFrame -from .ReconstructionView import ReconstructionView -from .ScanParamsDialog import ScanParamsDialog -from .guitools import BetterPushButton - - -class ImRecMainView(QtWidgets.QMainWindow): - sigSaveReconstruction = QtCore.Signal() - sigSaveReconstructionAll = QtCore.Signal() - sigSaveCoeffs = QtCore.Signal() - sigSaveCoeffsAll = QtCore.Signal() - sigSetDataFolder = QtCore.Signal() - sigSetSaveFolder = QtCore.Signal() - - sigReconstuctCurrent = QtCore.Signal() - sigReconstructMultiConsolidated = QtCore.Signal() - sigReconstructMultiIndividual = QtCore.Signal() - sigQuickLoadData = QtCore.Signal() - sigUpdate = QtCore.Signal() - - sigShowPatternChanged = QtCore.Signal(bool) - sigFindPattern = QtCore.Signal() - sigShowScanParamsClicked = QtCore.Signal() - sigPatternParamsChanged = QtCore.Signal() - - sigClosing = QtCore.Signal() - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self.setWindowTitle('Image Reconstruction') - - # self parameters - self.r_l_text = 'Right/Left' - self.u_d_text = 'Up/Down' - self.b_f_text = 'Back/Forth' - self.timepoints_text = 'Timepoints' - self.p_text = 'pos' - self.n_text = 'neg' - - # Actions in menubar - if not IS_HEADLESS: - menuBar = self.menuBar() - file = menuBar.addMenu('&File') - - quickLoadAction = QtWidgets.QAction('Quick load data…', self) - quickLoadAction.setShortcut('Ctrl+T') - quickLoadAction.triggered.connect(self.sigQuickLoadData) - file.addAction(quickLoadAction) - - file.addSeparator() - - - saveReconAction = QtWidgets.QAction('Save reconstruction…', self) - saveReconAction.setShortcut('Ctrl+D') - saveReconAction.triggered.connect(self.sigSaveReconstruction) - file.addAction(saveReconAction) - saveReconAllAction = QtWidgets.QAction('Save all reconstructions…', self) - saveReconAllAction.setShortcut('Ctrl+Shift+D') - saveReconAllAction.triggered.connect(self.sigSaveReconstructionAll) - file.addAction(saveReconAllAction) - saveCoeffsAction = QtWidgets.QAction('Save coefficients of reconstruction…', self) - saveCoeffsAction.setShortcut('Ctrl+A') - saveCoeffsAction.triggered.connect(self.sigSaveCoeffs) - file.addAction(saveCoeffsAction) - saveCoeffsAllAction = QtWidgets.QAction('Save all coefficients…', self) - saveCoeffsAllAction.setShortcut('Ctrl+Shift+A') - saveCoeffsAllAction.triggered.connect(self.sigSaveCoeffsAll) - file.addAction(saveCoeffsAllAction) - - file.addSeparator() - - setDataFolder = QtWidgets.QAction('Set default data folder…', self) - setDataFolder.triggered.connect(self.sigSetDataFolder) - file.addAction(setDataFolder) - - setSaveFolder = QtWidgets.QAction('Set default save folder…', self) - setSaveFolder.triggered.connect(self.sigSetSaveFolder) - file.addAction(setSaveFolder) - - self.dataFrame = DataFrame() - self.multiDataFrame = MultiDataFrame() - self.watcherFrame = WatcherFrame() - - btnFrame = BtnFrame() - btnFrame.sigReconstuctCurrent.connect(self.sigReconstuctCurrent) - btnFrame.sigReconstructMultiConsolidated.connect(self.sigReconstructMultiConsolidated) - btnFrame.sigReconstructMultiIndividual.connect(self.sigReconstructMultiIndividual) - btnFrame.sigQuickLoadData.connect(self.sigQuickLoadData) - btnFrame.sigUpdate.connect(self.sigUpdate) - if not IS_HEADLESS: - self.reconstructionWidget = ReconstructionView() - - self.parTree = ReconParTree() - self.showPatBool = self.parTree.p.param('Show pattern') - self.showPatBool.sigValueChanged.connect(lambda _, v: self.sigShowPatternChanged.emit(v)) - self.bleachBool = self.parTree.p.param('Bleaching correction') - self.extension = self.parTree.p.param('File extension') - self.findPatBtn = self.parTree.p.param('Pattern').param('Find pattern') - self.findPatBtn.sigActivated.connect(self.sigFindPattern) - self.scanParWinBtn = self.parTree.p.param('Scanning parameters') - self.scanParWinBtn.sigActivated.connect(self.sigShowScanParamsClicked) - self.parTree.p.param('Pattern').sigTreeStateChanged.connect(self.sigPatternParamsChanged) - - self.scanParamsDialog = ScanParamsDialog( - self, self.r_l_text, self.u_d_text, self.b_f_text, - self.timepoints_text, self.p_text, self.n_text - ) - - self.pickDatasetsDialog = PickDatasetsDialog(self, allowMultiSelect=True) - - parameterFrame = QtWidgets.QFrame() - parameterGrid = QtWidgets.QGridLayout() - parameterFrame.setLayout(parameterGrid) - parameterGrid.addWidget(self.parTree, 0, 0) - - DataDock = DockArea() - - self.watcherDock = Dock('File watcher') - self.watcherDock.addWidget(self.watcherFrame) - DataDock.addDock(self.watcherDock) - - self.multiDataDock = Dock('Multidata management') - self.multiDataDock.addWidget(self.multiDataFrame) - DataDock.addDock(self.multiDataDock, 'above', self.watcherDock) - - self.currentDataDock = Dock('Current data') - self.currentDataDock.addWidget(self.dataFrame) - DataDock.addDock(self.currentDataDock, 'above', self.multiDataDock) - - layout = QtWidgets.QHBoxLayout() - self.cwidget = QtWidgets.QWidget() - self.setCentralWidget(self.cwidget) - self.cwidget.setLayout(layout) - - leftContainer = QtWidgets.QVBoxLayout() - leftContainer.setContentsMargins(0, 0, 0, 0) - - rightContainer = QtWidgets.QVBoxLayout() - rightContainer.setContentsMargins(0, 0, 0, 0) - - leftContainer.addWidget(parameterFrame, 1) - leftContainer.addWidget(btnFrame, 0) - leftContainer.addWidget(DataDock, 1) - if not IS_HEADLESS: - rightContainer.addWidget(self.reconstructionWidget) - - layout.addLayout(leftContainer, 1) - layout.addLayout(rightContainer, 3) - - pg.setConfigOption('imageAxisOrder', 'row-major') - - def requestFilePathFromUser(self, caption=None, defaultFolder=None, nameFilter=None, - isSaving=False): - func = (QtWidgets.QFileDialog().getOpenFileName if not isSaving - else QtWidgets.QFileDialog().getSaveFileName) - - return func(self, caption=caption, directory=defaultFolder, filter=nameFilter)[0] - - def requestFolderPathFromUser(self, caption=None, defaultFolder=None): - return QtWidgets.QFileDialog.getExistingDirectory(caption=caption, directory=defaultFolder) - - def raiseCurrentDataDock(self): - self.currentDataDock.raiseDock() - - def raiseMultiDataDock(self): - self.multiDataDock.raiseDock() - - def addNewData(self, reconObj, name): - self.reconstructionWidget.addNewData(reconObj, name) - - def getMultiDatas(self): - dataList = self.multiDataFrame.dataList - for i in range(dataList.count()): - yield dataList.item(i).data(1) - - def showScanParamsDialog(self, blocking=False): - if blocking: - result = self.scanParamsDialog.exec_() - return result == QtWidgets.QDialog.Accepted - else: - self.scanParamsDialog.show() - - def showPickDatasetsDialog(self, blocking=False): - if blocking: - result = self.pickDatasetsDialog.exec_() - return result == QtWidgets.QDialog.Accepted - else: - self.pickDatasetsDialog.show() - - def getPatternParams(self): - patternPars = self.parTree.p.param('Pattern') - return (np.mod(patternPars.param('Row-offset').value(), - patternPars.param('Row-period').value()), - np.mod(patternPars.param('Col-offset').value(), - patternPars.param('Col-period').value()), - patternPars.param('Row-period').value(), - patternPars.param('Col-period').value()) - - def setPatternParams(self, rowOffset, colOffset, rowPeriod, colPeriod): - patternPars = self.parTree.p.param('Pattern') - patternPars.param('Row-offset').setValue(rowOffset) - patternPars.param('Col-offset').setValue(colOffset) - patternPars.param('Row-period').setValue(rowPeriod) - patternPars.param('Col-period').setValue(colPeriod) - - def getComputeDevice(self): - return self.parTree.p.param('CPU/GPU').value() - - def getPixelSizeNm(self): - return self.parTree.p.param('Pixel size').value() - - def getFwhmNm(self): - return self.parTree.p.param('Reconstruction options').param('PSF FWHM').value() - - def getBgModelling(self): - return self.parTree.p.param('Reconstruction options').param('BG modelling').value() - - def getBgGaussianSize(self): - return self.parTree.p.param('Reconstruction options').param('BG modelling') \ - .param('BG Gaussian size').value() - - def closeEvent(self, event): - self.sigClosing.emit() - event.accept() - - -class ReconParTree(ParameterTree): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - # Parameter tree for the reconstruction - params = [ - {'name': 'Pixel size', 'type': 'float', 'value': 65, 'suffix': 'nm'}, - {'name': 'CPU/GPU', 'type': 'list', 'values': ['GPU', 'CPU']}, - {'name': 'Pattern', 'type': 'group', 'children': [ - {'name': 'Row-offset', 'type': 'float', 'value': 9.89, 'limits': (0, 9999)}, - {'name': 'Col-offset', 'type': 'float', 'value': 10.4, 'limits': (0, 9999)}, - {'name': 'Row-period', 'type': 'float', 'value': 11.05, 'limits': (0, 9999)}, - {'name': 'Col-period', 'type': 'float', 'value': 11.05, 'limits': (0, 9999)}, - {'name': 'Find pattern', 'type': 'action'}]}, - {'name': 'Reconstruction options', 'type': 'group', 'children': [ - {'name': 'PSF FWHM', 'type': 'float', 'value': 220, 'limits': (0, 9999), - 'suffix': 'nm'}, - {'name': 'BG modelling', 'type': 'list', - 'values': ['Constant', 'Gaussian', 'No background'], 'children': [ - {'name': 'BG Gaussian size', 'type': 'float', 'value': 500, 'suffix': 'nm'}]}]}, - {'name': 'Scanning parameters', 'type': 'action'}, - {'name': 'Show pattern', 'type': 'bool'}, - {'name': 'Bleaching correction', 'type': 'bool'}, - {'name': 'File extension', 'type': 'list', 'values': ['hdf5', 'zarr']}] - - self.p = Parameter.create(name='params', type='group', children=params) - self.setParameters(self.p, showTop=False) - self._writable = True - - -class BtnFrame(QtWidgets.QFrame): - sigReconstuctCurrent = QtCore.Signal() - sigReconstructMultiConsolidated = QtCore.Signal() - sigReconstructMultiIndividual = QtCore.Signal() - sigQuickLoadData = QtCore.Signal() - sigUpdate = QtCore.Signal() - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self.reconCurrBtn = BetterPushButton('Reconstruct current') - self.reconCurrBtn.clicked.connect(self.sigReconstuctCurrent) - self.quickLoadDataBtn = BetterPushButton('Quick load data') - self.quickLoadDataBtn.clicked.connect(self.sigQuickLoadData) - self.updateBtn = BetterPushButton('Update reconstruction') - self.updateBtn.clicked.connect(self.sigUpdate) - - self.reconMultiBtn = QtWidgets.QToolButton() - self.reconMultiBtn.setSizePolicy( - QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred) - ) - self.reconMultiBtn.setText('Reconstruct multidata') - self.reconMultiBtn.setPopupMode(QtWidgets.QToolButton.ToolButtonPopupMode.InstantPopup) - self.reconMultiConsolidated = QtWidgets.QAction('Consolidate into a single reconstruction') - self.reconMultiConsolidated.triggered.connect(self.sigReconstructMultiConsolidated) - self.reconMultiBtn.addAction(self.reconMultiConsolidated) - self.reconMultiIndividual = QtWidgets.QAction('Reconstruct data items individually') - self.reconMultiIndividual.triggered.connect(self.sigReconstructMultiIndividual) - self.reconMultiBtn.addAction(self.reconMultiIndividual) - - layout = QtWidgets.QGridLayout() - self.setLayout(layout) - - layout.addWidget(self.quickLoadDataBtn, 0, 0, 1, 2) - layout.addWidget(self.reconCurrBtn, 1, 0) - layout.addWidget(self.reconMultiBtn, 1, 1) - layout.addWidget(self.updateBtn, 2, 0, 1, 2) - - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imreconstruct/view/MultiDataFrame.py b/imswitch/imreconstruct/view/MultiDataFrame.py deleted file mode 100644 index 206676bef..000000000 --- a/imswitch/imreconstruct/view/MultiDataFrame.py +++ /dev/null @@ -1,213 +0,0 @@ -from qtpy import QtCore, QtGui, QtWidgets - -from .guitools import BetterPushButton - - -class MultiDataFrame(QtWidgets.QFrame): - # Signals - sigAddDataClicked = QtCore.Signal() - sigLoadCurrentDataClicked = QtCore.Signal() - sigLoadAllDataClicked = QtCore.Signal() - sigUnloadCurrentDataClicked = QtCore.Signal() - sigUnloadAllDataClicked = QtCore.Signal() - sigDeleteCurrentDataClicked = QtCore.Signal() - sigDeleteAllDataClicked = QtCore.Signal() - sigSaveCurrentDataClicked = QtCore.Signal() - sigSaveAllDataClicked = QtCore.Signal() - sigSetAsCurrentDataClicked = QtCore.Signal() - sigSelectedItemChanged = QtCore.Signal() - - # Methods - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self.dataList = QtWidgets.QListWidget() - self.dataList.currentItemChanged.connect(self.sigSelectedItemChanged) - self.dataList.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection) - - dataLoadedLabel = QtWidgets.QLabel('Data loaded') - dataLoadedLabel.setAlignment(QtCore.Qt.AlignTop) - self.dataLoadedStatus = QtWidgets.QLabel() - self.dataLoadedStatus.setAlignment(QtCore.Qt.AlignTop) - - self.setDataBtn = BetterPushButton('Set as current data') - self.setDataBtn.clicked.connect(self.sigSetAsCurrentDataClicked) - self.addDataBtn = BetterPushButton('Add data') - self.addDataBtn.clicked.connect(self.sigAddDataClicked) - self.loadCurrDataBtn = BetterPushButton('Load selected data') - self.loadCurrDataBtn.clicked.connect(self.sigLoadCurrentDataClicked) - self.loadAllDataBtn = BetterPushButton('Load all data') - self.loadAllDataBtn.clicked.connect(self.sigLoadAllDataClicked) - - self.delDataBtn = BetterPushButton('Remove') - self.delDataBtn.clicked.connect(self.sigDeleteCurrentDataClicked) - self.unloadDataBtn = BetterPushButton('Unload') - self.unloadDataBtn.clicked.connect(self.sigUnloadCurrentDataClicked) - self.delAllDataBtn = BetterPushButton('Remove all') - self.delAllDataBtn.clicked.connect(self.sigDeleteAllDataClicked) - self.unloadAllDataBtn = BetterPushButton('Unload all') - self.unloadAllDataBtn.clicked.connect(self.sigUnloadAllDataClicked) - self.saveDataBtn = BetterPushButton('Save selected data') - self.saveDataBtn.clicked.connect(self.sigSaveCurrentDataClicked) - self.saveAllDataBtn = BetterPushButton('Save all') - self.saveAllDataBtn.clicked.connect(self.sigSaveAllDataClicked) - - # Set layout - layout = QtWidgets.QGridLayout() - self.setLayout(layout) - - layout.addWidget(dataLoadedLabel, 0, 1) - layout.addWidget(self.dataLoadedStatus, 0, 2) - layout.addWidget(self.addDataBtn, 1, 1) - layout.addWidget(self.loadCurrDataBtn, 2, 1) - layout.addWidget(self.loadAllDataBtn, 3, 1) - layout.addWidget(self.setDataBtn, 4, 1) - layout.addWidget(self.delDataBtn, 1, 2) - layout.addWidget(self.unloadDataBtn, 2, 2) - layout.addWidget(self.delAllDataBtn, 3, 2) - layout.addWidget(self.unloadAllDataBtn, 4, 2) - layout.addWidget(self.saveDataBtn, 5, 1) - layout.addWidget(self.saveAllDataBtn, 5, 2) - layout.addWidget(self.unloadAllDataBtn, 4, 2) - layout.addWidget(self.dataList, 0, 0, -1, 1) - - def requestFilePathsFromUser(self, defaultFolder=None): - return QtWidgets.QFileDialog().getOpenFileNames(directory=defaultFolder)[0] - - def requestDeleteSelectedConfirmation(self): - result = QtWidgets.QMessageBox.question( - self, 'Remove selected?', 'Remove the selected item?', - QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No - ) - return result == QtWidgets.QMessageBox.Yes - - def requestDeleteAllConfirmation(self): - result = QtWidgets.QMessageBox.question( - self, 'Remove all?', 'Remove all items?', - QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No - ) - return result == QtWidgets.QMessageBox.Yes - - def requestOverwriteConfirmation(self, name): - result = QtWidgets.QMessageBox.question( - self, 'Overwrite file?', f'A file named {name} already exists. Overwrite it?', - QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No - ) - return result == QtWidgets.QMessageBox.Yes - - def addDataObj(self, name, datasetName, dataObj): - listItem = QtWidgets.QListWidgetItem('') - listItem.setData(1, dataObj) - listItem.setData(3, name) - listItem.setData(4, datasetName) - listItem.setText(self.getTextForItem(listItem)) - self.dataList.addItem(listItem) - self.dataList.setCurrentItem(listItem) - - def setDataObjMemoryFlag(self, dataObj, inMemory): - for i in range(self.dataList.count()): - item = self.dataList.item(i) - if item.data(1) == dataObj: - itemText = self.getTextForItem(item) - if inMemory: - itemText += ' (MEMORY)' - item.setText(itemText) - - def getTextForItem(self, item): - name = item.data(3) - datasetName = item.data(4) - - text = f'{name}: {datasetName}' if datasetName is not None else name - - duplicateNum = item.data(5) - if duplicateNum is None: - duplicateNum = 0 - for i in range(self.dataList.count()): - otherItem = self.dataList.item(i) - if (item is not otherItem and name == otherItem.data(3) - and datasetName == otherItem.data(4) and duplicateNum <= otherItem.data(5)): - duplicateNum += 1 - item.setData(5, duplicateNum) - if duplicateNum > 0: - text = f'{name} [{duplicateNum}]: {datasetName}' if datasetName is not None else name - - return text - - def getSelectedDataObj(self): - currentItem = self.dataList.currentItem() - return self.dataList.currentItem().data(1) if currentItem is not None else None - - def getSelectedDataObjs(self): - for i in range(self.dataList.count()): - if self.dataList.item(i).isSelected(): - yield self.dataList.item(i).data(1) - - def getAllDataObjs(self): - for i in range(self.dataList.count()): - yield self.dataList.item(i).data(1) - - def delDataByDataObj(self, dataObj): - for i in reversed(range(self.dataList.count())): - if self.dataList.item(i) is not None and self.dataList.item(i).data(1) is dataObj: - self.dataList.takeItem(i) - - def setCurrentRowHighlighted(self, highlighted): - self.dataList.currentItem().setBackground( - QtGui.QColor('green' if highlighted else 'transparent') - ) - - def setAllRowsHighlighted(self, highlighted): - for i in range(self.dataList.count()): - self.dataList.item(i).setBackground( - QtGui.QColor('green' if highlighted else 'transparent') - ) - - def setLoadedStatusText(self, text): - self.dataLoadedStatus.setText(text) - - def setAddButtonEnabled(self, value): - self.addDataBtn.setEnabled(value) - - def setSetCurrentButtonEnabled(self, value): - self.setDataBtn.setEnabled(value) - - def setLoadButtonEnabled(self, value): - self.loadCurrDataBtn.setEnabled(value) - - def setLoadAllButtonEnabled(self, value): - self.loadAllDataBtn.setEnabled(value) - - def setUnloadButtonEnabled(self, value): - self.unloadDataBtn.setEnabled(value) - - def setUnloadAllButtonEnabled(self, value): - self.unloadAllDataBtn.setEnabled(value) - - def setDeleteButtonEnabled(self, value): - self.delDataBtn.setEnabled(value) - - def setDeleteAllButtonEnabled(self, value): - self.delAllDataBtn.setEnabled(value) - - def setSaveButtonEnabled(self, value): - self.saveDataBtn.setEnabled(value) - - def setSaveAllButtonEnabled(self, value): - self.saveAllDataBtn.setEnabled(value) - - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imreconstruct/view/ReconstructionView.py b/imswitch/imreconstruct/view/ReconstructionView.py deleted file mode 100644 index 4e08b4276..000000000 --- a/imswitch/imreconstruct/view/ReconstructionView.py +++ /dev/null @@ -1,163 +0,0 @@ -import numpy as np -from qtpy import QtCore, QtWidgets - -from imswitch.imcommon.view.guitools import naparitools -from . import guitools - - -class ReconstructionView(QtWidgets.QFrame): - """ Frame for showing the reconstructed image""" - - # Signals - sigItemSelected = QtCore.Signal() - sigAxisStepChanged = QtCore.Signal(tuple) - sigViewChanged = QtCore.Signal() - - # Methods - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - # Image Widget - naparitools.addNapariGrayclipColormap() - self.napariViewer = naparitools.EmbeddedNapari() - self.napariViewer.dims.events.connect(self.dimsChanged) - naparitools.NapariUpdateLevelsWidget.addToViewer(self.napariViewer) - - self.imgLayer = self.napariViewer.add_image( - np.zeros((1, 1)), rgb=False, name='Reconstruction', colormap='grayclip', protected=True - ) - - # Button group for choosing view - self.chooseViewGroup = QtWidgets.QButtonGroup() - self.chooseViewBox = QtWidgets.QGroupBox('Choose view') - self.viewLayout = QtWidgets.QVBoxLayout() - - self.standardView = QtWidgets.QRadioButton('Standard view') - self.standardView.viewName = 'standard' - self.chooseViewGroup.addButton(self.standardView) - self.viewLayout.addWidget(self.standardView) - - self.bottomView = QtWidgets.QRadioButton('Bottom side view') - self.bottomView.viewName = 'bottom' - self.chooseViewGroup.addButton(self.bottomView) - self.viewLayout.addWidget(self.bottomView) - - self.leftView = QtWidgets.QRadioButton('Left side view') - self.leftView.viewName = 'left' - self.chooseViewGroup.addButton(self.leftView) - self.viewLayout.addWidget(self.leftView) - - self.chooseViewBox.setLayout(self.viewLayout) - self.chooseViewGroup.buttonClicked.connect(self.sigViewChanged) - - # List for storing sevral data sets - self.reconList = QtWidgets.QListWidget() - self.reconList.currentItemChanged.connect(self.sigItemSelected) - self.reconList.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection) - removeReconBtn = guitools.BetterPushButton('Remove current') - removeReconBtn.clicked.connect(self.removeRecon) - removeAllReconBtn = guitools.BetterPushButton('Remove all') - removeAllReconBtn.clicked.connect(self.removeAllRecon) - - # Set initial states - self.standardView.setChecked(True) - - # Set layout - layout = QtWidgets.QGridLayout() - - self.setLayout(layout) - - layout.addWidget(self.napariViewer.get_widget(), 0, 0, 4, 1) - layout.addWidget(self.chooseViewBox, 0, 1, 1, 2) - layout.addWidget(self.reconList, 0, 3, 2, 1) - layout.addWidget(removeReconBtn, 2, 3) - layout.addWidget(removeAllReconBtn, 3, 3) - - layout.setRowStretch(1, 1) - layout.setColumnStretch(0, 100) - layout.setColumnStretch(2, 5) - - def dimsChanged(self, event): - if event.type == 'current_step': - self.sigAxisStepChanged.emit(event.value) - - def addNewData(self, reconObj, name): - ind = 0 - for i in range(self.reconList.count()): - if name + '.' + str(ind) == self.reconList.item(i).data(0): - ind += 1 - name = name + '.' + str(ind) - - listItem = QtWidgets.QListWidgetItem(name) - listItem.setData(1, reconObj) - self.reconList.addItem(listItem) - self.reconList.setCurrentItem(listItem) - - def getCurrentItemIndex(self): - return self.reconList.indexFromItem(self.reconList.currentItem()).row() - - def getDataAtIndex(self, index): - return self.reconList.item(index).data(1) - - def getCurrentItemData(self): - currentItem = self.reconList.currentItem() - return currentItem.data(1) if currentItem is not None else None - - def getAllItemDatas(self): - for i in range(self.reconList.count()): - item = self.reconList.item(i) - yield item.text(), item.data(1) - - def getViewName(self): - return self.chooseViewGroup.checkedButton().viewName - - def getImage(self): - return self.imgLayer.data - - def setImage(self, im, axisLabels): - self.imgLayer.data = im - self.napariViewer.dims.axis_labels = tuple(axisLabels) - - def clearImage(self): - self.imgLayer.data = np.zeros((1, 1)) - - def getImageDisplayLevels(self): - return self.imgLayer.contrast_limits - - def setImageDisplayLevels(self, minimum, maximum): - self.imgLayer.contrast_limits = (minimum, maximum) - - def setImageDisplayLevelsRange(self, minimum, maximum): - self.imgLayer.contrast_limits_range = (minimum, maximum) - - def removeRecon(self): - numSelected = len(self.reconList.selectedIndexes()) - while not numSelected == 0: - row = self.reconList.selectedIndexes()[0].row() - self.reconList.takeItem(row) - numSelected -= 1 - - def removeAllRecon(self): - for i in range(self.reconList.count()): - currRow = self.reconList.currentRow() - self.reconList.takeItem(currRow) - - def resetView(self): - self.napariViewer.reset_view() - - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imreconstruct/view/ScanParamsDialog.py b/imswitch/imreconstruct/view/ScanParamsDialog.py deleted file mode 100644 index 64129359f..000000000 --- a/imswitch/imreconstruct/view/ScanParamsDialog.py +++ /dev/null @@ -1,196 +0,0 @@ -from qtpy import QtCore, QtWidgets - -from .guitools import BetterPushButton - - -class ScanParamsDialog(QtWidgets.QDialog): - """Seperate window for editing scanning parameters""" - - # Signals - sigApplyParams = QtCore.Signal() - - # Methods - def __init__(self, parent, r_l_text, u_d_text, b_f_text, - timepoints_text, p_text, n_text, *args, **kwargs): - super().__init__(parent, *args, **kwargs) - - self.r_l_text = r_l_text - self.u_d_text = u_d_text - self.b_f_text = b_f_text - self.p_text = p_text - self.timepoints_text = timepoints_text - self.n_text = n_text - - imDimLabel = QtWidgets.QLabel('Image dimension') - dimDirLabel = QtWidgets.QLabel('Direction') - imStepsLabel = QtWidgets.QLabel('Steps') - imStepSizeLabel = QtWidgets.QLabel('Step size (nm)') - - dim0Label = QtWidgets.QLabel('Dimension 0') - self.dim0DimEdit = QtWidgets.QComboBox() - self.dim0DimEdit.addItems([self.r_l_text, self.u_d_text, self.b_f_text]) - self.dim0DimEdit.currentIndexChanged.connect(self.dim0Changed) - self.dim0DirEdit = QtWidgets.QComboBox() - self.dim0DirEdit.addItems([self.p_text, self.n_text]) - self.dim0SizeEdit = QtWidgets.QLineEdit() - self.dim0StepSizeEdit = QtWidgets.QLineEdit() - - dim1Label = QtWidgets.QLabel('Dimension 1') - self.dim1DimEdit = QtWidgets.QComboBox() - self.dim1DimEdit.currentIndexChanged.connect(self.dim1Changed) - self.dim1DirEdit = QtWidgets.QComboBox() - self.dim1DirEdit.addItems([self.p_text, self.n_text]) - self.dim1SizeEdit = QtWidgets.QLineEdit() - self.dim1StepSizeEdit = QtWidgets.QLineEdit() - - dim2Label = QtWidgets.QLabel('Dimension 2') - self.dim2DimEdit = QtWidgets.QComboBox() - self.dim2DirEdit = QtWidgets.QComboBox() - self.dim2DirEdit.addItems([self.p_text, self.n_text]) - self.dim2SizeEdit = QtWidgets.QLineEdit() - self.dim2StepSizeEdit = QtWidgets.QLineEdit() - - dim3Label = QtWidgets.QLabel('Dimension 3') - self.dim3DimLabel = QtWidgets.QLabel(self.timepoints_text) - self.dim3SizeEdit = QtWidgets.QLineEdit() - self.dim3StepSizeEdit = QtWidgets.QLineEdit() - - self.unidirCheck = QtWidgets.QCheckBox('Unidirectional scan') - - okBtn = BetterPushButton('OK') - okBtn.clicked.connect(self.okClicked) - - layout = QtWidgets.QGridLayout() - self.setLayout(layout) - - layout.addWidget(imDimLabel, 0, 1) - layout.addWidget(dimDirLabel, 0, 2) - layout.addWidget(imStepsLabel, 0, 3) - layout.addWidget(imStepSizeLabel, 0, 4) - layout.addWidget(dim0Label, 1, 0) - layout.addWidget(self.dim0DimEdit, 1, 1) - layout.addWidget(self.dim0DirEdit, 1, 2) - layout.addWidget(self.dim0SizeEdit, 1, 3) - layout.addWidget(self.dim0StepSizeEdit, 1, 4) - layout.addWidget(dim1Label, 2, 0) - layout.addWidget(self.dim1DimEdit, 2, 1) - layout.addWidget(self.dim1DirEdit, 2, 2) - layout.addWidget(self.dim1SizeEdit, 2, 3) - layout.addWidget(self.dim1StepSizeEdit, 2, 4) - layout.addWidget(dim2Label, 3, 0) - layout.addWidget(self.dim2DimEdit, 3, 1) - layout.addWidget(self.dim2DirEdit, 3, 2) - layout.addWidget(self.dim2SizeEdit, 3, 3) - layout.addWidget(self.dim2StepSizeEdit, 3, 4) - layout.addWidget(dim3Label, 4, 0) - layout.addWidget(self.dim3DimLabel, 4, 1) - layout.addWidget(self.dim3SizeEdit, 4, 3) - layout.addWidget(self.dim3StepSizeEdit, 4, 4) - layout.addWidget(self.unidirCheck, 5, 1) - layout.addWidget(okBtn, 5, 2) - - def updateValues(self, parDict): - try: - self.dim0DimEdit.setCurrentIndex(self.dim0DimEdit.findText(parDict['dimensions'][0])) - self.dim1DimEdit.setCurrentIndex(self.dim1DimEdit.findText(parDict['dimensions'][1])) - self.dim2DimEdit.setCurrentIndex(self.dim2DimEdit.findText(parDict['dimensions'][2])) - self.dim0Changed() - - self.dim0DirEdit.setCurrentIndex(self.dim0DirEdit.findText(parDict['directions'][0])) - self.dim1DirEdit.setCurrentIndex(self.dim1DirEdit.findText(parDict['directions'][1])) - self.dim2DirEdit.setCurrentIndex(self.dim2DirEdit.findText(parDict['directions'][2])) - - self.dim0SizeEdit.setText(parDict['steps'][0]) - self.dim1SizeEdit.setText(parDict['steps'][1]) - self.dim2SizeEdit.setText(parDict['steps'][2]) - self.dim3SizeEdit.setText(parDict['steps'][3]) - - self.dim0StepSizeEdit.setText(parDict['step_sizes'][0]) - self.dim1StepSizeEdit.setText(parDict['step_sizes'][1]) - self.dim2StepSizeEdit.setText(parDict['step_sizes'][2]) - self.dim3StepSizeEdit.setText(parDict['step_sizes'][3]) - - self.unidirCheck.setChecked(parDict['unidirectional']) - except Exception as e: - self.dim0Changed() - raise e - - def dim0Changed(self): - currText = self.dim0DimEdit.currentText() - self.dim1DimEdit.clear() - if currText == self.r_l_text: - self.dim1DimEdit.addItems([self.u_d_text, self.b_f_text]) - elif currText == self.u_d_text: - self.dim1DimEdit.addItems([self.r_l_text, self.b_f_text]) - else: - self.dim1DimEdit.addItems([self.r_l_text, self.u_d_text]) - - self.dim1Changed() - - def dim1Changed(self): - currdim0Text = self.dim0DimEdit.currentText() - currdim1Text = self.dim1DimEdit.currentText() - self.dim2DimEdit.clear() - if currdim0Text == self.r_l_text: - if currdim1Text == self.u_d_text: - self.dim2DimEdit.addItem(self.b_f_text) - else: - self.dim2DimEdit.addItem(self.u_d_text) - elif currdim0Text == self.u_d_text: - if currdim1Text == self.r_l_text: - self.dim2DimEdit.addItem(self.b_f_text) - else: - self.dim2DimEdit.addItem(self.r_l_text) - else: - if currdim1Text == self.r_l_text: - self.dim2DimEdit.addItem(self.u_d_text) - else: - self.dim2DimEdit.addItem(self.r_l_text) - - def getDimensions(self): - return [self.dim0DimEdit.currentText(), - self.dim1DimEdit.currentText(), - self.dim2DimEdit.currentText(), - self.dim3DimLabel.text()] - - def getDirections(self): - return [self.dim0DirEdit.currentText(), - self.dim1DirEdit.currentText(), - self.dim2DirEdit.currentText(), - self.p_text] - - def getSteps(self): - return [self.dim0SizeEdit.text(), - self.dim1SizeEdit.text(), - self.dim2SizeEdit.text(), - self.dim3SizeEdit.text()] - - def getStepSizes(self): - return [self.dim0StepSizeEdit.text(), - self.dim1StepSizeEdit.text(), - self.dim2StepSizeEdit.text(), - self.dim3StepSizeEdit.text()] - - def getUnidirectional(self): - return self.unidirCheck.isChecked() - - def okClicked(self): - self.sigApplyParams.emit() - self.close() - - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imreconstruct/view/WatcherFrame.py b/imswitch/imreconstruct/view/WatcherFrame.py deleted file mode 100644 index 53560f9ca..000000000 --- a/imswitch/imreconstruct/view/WatcherFrame.py +++ /dev/null @@ -1,67 +0,0 @@ -from qtpy import QtCore, QtWidgets - -from imswitch.imcontrol.view import guitools -import os - - -class WatcherFrame(QtWidgets.QFrame): - """Frame for reconstructing files from a folder automatically.""" - - sigWatchChanged = QtCore.Signal(bool) # (enabled) - sigChangeFolder = QtCore.Signal() - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self.path = '' - self.folderEdit = QtWidgets.QLineEdit(self.path) - - self.browseFolderButton = guitools.BetterPushButton('Browse') - self.watchCheck = QtWidgets.QCheckBox('Watch and run') - - self.listWidget = QtWidgets.QListWidget() - #self.updateFileList() - - layout = QtWidgets.QGridLayout() - self.setLayout(layout) - - layout.addWidget(self.folderEdit, 0, 1) - layout.addWidget(self.browseFolderButton, 0, 0) - layout.addWidget(self.listWidget, 1, 0, 1, 2) - layout.addWidget(self.watchCheck, 2, 0) - - self.watchCheck.toggled.connect(self.sigWatchChanged) - self.browseFolderButton.clicked.connect(self.browse) - - def updateFileList(self, extension): - self.path = self.folderEdit.text() - res = [] - for file in os.listdir(self.path): - if file.endswith('.'+extension): - res.append(file) - - self.listWidget.clear() - self.listWidget.addItems(res) - - def browse(self): - path = guitools.askForFolderPath(self, defaultFolder=self.path) - if path: - self.path = path - self.folderEdit.setText(self.path) - self.sigChangeFolder.emit() - -# Copyright (C) 2020-2024 ImSwitch developers -# This file is part of ImSwitch. -# -# ImSwitch is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# ImSwitch is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . diff --git a/imswitch/imreconstruct/view/__init__.py b/imswitch/imreconstruct/view/__init__.py deleted file mode 100644 index d5f13ab28..000000000 --- a/imswitch/imreconstruct/view/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .ImRecMainView import ImRecMainView diff --git a/imswitch/imreconstruct/view/guitools/__init__.py b/imswitch/imreconstruct/view/guitools/__init__.py deleted file mode 100644 index f8d15ca1c..000000000 --- a/imswitch/imreconstruct/view/guitools/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from imswitch.imcommon.view.guitools import * # noqa diff --git a/requirements.txt b/requirements.txt index 676cba339..2ebdc8838 100644 --- a/requirements.txt +++ b/requirements.txt @@ -22,6 +22,7 @@ scikit-image >= 0.19.2 Send2Trash >= 1.8 tifffile >= 2020.11.26 ome_zarr >= 0.6.1 +ome-types >= 0.5.0 Pyro5 >= 5.14 fastAPI >= 0.86.0 uvicorn[standard] >= 0.19.0