From a92458a62987f64334bfa255a4f278ee791d0638 Mon Sep 17 00:00:00 2001 From: Harrison-Oatman <46121828+Harrison-Oatman@users.noreply.github.com> Date: Thu, 8 Jan 2026 01:42:29 -0500 Subject: [PATCH 1/3] New pattern model specification --- src/pyclm/core/patterns/bar_patterns.py | 60 +++++--------- .../patterns/feedback_control_patterns.py | 69 ++++++++-------- src/pyclm/core/patterns/ktr_patterns.py | 52 +++++------- src/pyclm/core/patterns/pattern.py | 80 +++++++++++++++++-- src/pyclm/core/patterns/pattern_process.py | 18 +++-- src/pyclm/core/patterns/static_patterns.py | 26 ++---- src/pyclm/core/patterns/wave_patterns.py | 27 ++----- 7 files changed, 172 insertions(+), 160 deletions(-) diff --git a/src/pyclm/core/patterns/bar_patterns.py b/src/pyclm/core/patterns/bar_patterns.py index ff2abf5..7956912 100644 --- a/src/pyclm/core/patterns/bar_patterns.py +++ b/src/pyclm/core/patterns/bar_patterns.py @@ -16,7 +16,6 @@ def __new__(cls, *args, **kwargs): return super().__new__(cls) def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) print(f"Initializing {self.__class__.__name__}") @@ -28,26 +27,21 @@ class StationaryBarPattern(BarPatternBase): name = "bar (stationary)" - def __init__(self, experiment_name, camera_properties, duty_cycle=0.2, bar_speed=0, period=30, **kwargs): + def __init__(self, duty_cycle=0.2, bar_speed=0, period=30, **kwargs): """ :param duty_cycle: fraction of time spent on (float 0-1), and consequently fraction of vertical axis containing "on" pixels :param bar_speed: speed in um/min :param period: period in um """ - super().__init__(experiment_name, camera_properties) + super().__init__(**kwargs) self.duty_cycle = duty_cycle self.bar_speed = 0 self.period_space = period # in um self.period_time = 0 # in minutes - def initialize(self, experiment): - super().initialize(experiment) - - return [] - - def generate(self, data_dock: DataDock): + def generate(self, context): xx, yy = self.get_meshgrid() @@ -63,41 +57,34 @@ class BarPattern(BarPatternBase): name = "bar" - def __init__(self, experiment_name, camera_properties, duty_cycle=0.2, bar_speed=1, period=30, **kwargs): + def __init__(self, duty_cycle=0.2, bar_speed=1, period=30, **kwargs): """ :param duty_cycle: fraction of time spent on (float 0-1), and consequently fraction of vertical axis containing "on" pixels :param bar_speed: speed in um/min :param period: period in um """ - super().__init__(experiment_name, camera_properties) + super().__init__(**kwargs) self.duty_cycle = duty_cycle self.bar_speed = bar_speed self.period_space = period # in um self.period_time = period / bar_speed # in minutes - def initialize(self, experiment): - super().initialize(experiment) - - return [] + def _get_pattern_at_time(self, t_minutes): + xx, yy = self.get_meshgrid() + is_on = ((t_minutes - (yy / self.bar_speed)) % self.period_time) < self.duty_cycle*self.period_time + return is_on.astype(np.float16) - def generate(self, data_dock: DataDock): - - t = data_dock.time_seconds / 60 - - xx, yy = self.get_meshgrid() - - is_on = ((t - (yy / self.bar_speed)) % self.period_time) < self.duty_cycle*self.period_time - - return is_on.astype(np.float16) + def generate(self, context): + return self._get_pattern_at_time(context.time / 60) class SawToothMethod(PatternMethod): name = "sawtooth" - def __init__(self, experiment_name, camera_properties, duty_cycle=0.2, + def __init__(self, duty_cycle=0.2, bar_speed=1, period=30, inverse=False, **kwargs): """ :param duty_cycle: fraction of time spent on (float 0-1), and consequently fraction of @@ -105,7 +92,7 @@ def __init__(self, experiment_name, camera_properties, duty_cycle=0.2, :param bar_speed: speed in um/min :param period: period in um """ - super().__init__(experiment_name, camera_properties) + super().__init__(**kwargs) self.duty_cycle = duty_cycle self.bar_speed = bar_speed @@ -113,13 +100,8 @@ def __init__(self, experiment_name, camera_properties, duty_cycle=0.2, self.period_time = period / bar_speed # in minutes self.inverse = inverse - def initialize(self, experiment): - super().initialize(experiment) - - return [] - - def generate(self, data_dock: DataDock): - t = data_dock.time_seconds / 60 + def generate(self, context): + t = context.time / 60 xx, yy = self.get_meshgrid() @@ -141,7 +123,7 @@ class BouncingBarPattern(BarPattern): name = "bar_bounce" - def __init__(self, experiment_name, camera_properties, duty_cycle=0.2, + def __init__(self, duty_cycle=0.2, bar_speed=1, period=30, t_loop=60, **kwargs): """ @@ -151,11 +133,11 @@ def __init__(self, experiment_name, camera_properties, duty_cycle=0.2, :param period: period in um :param t_loop: period of reversal (there and back) in minutes """ - super().__init__(experiment_name, camera_properties, duty_cycle, bar_speed, period, **kwargs) + super().__init__(duty_cycle=duty_cycle, bar_speed=bar_speed, period=period, **kwargs) self.t_loop_s = t_loop * 60 - def generate(self, data_dock: DataDock): - t = data_dock.time_seconds + def generate(self, context): + t = context.time t = t % self.t_loop_s halfway = self.t_loop_s / 2 @@ -163,6 +145,4 @@ def generate(self, data_dock: DataDock): if t > halfway: t = halfway - (t - halfway) - data_dock.time_seconds = t - - return super().generate(data_dock) + return self._get_pattern_at_time(t / 60) diff --git a/src/pyclm/core/patterns/feedback_control_patterns.py b/src/pyclm/core/patterns/feedback_control_patterns.py index 87d6860..991f84c 100644 --- a/src/pyclm/core/patterns/feedback_control_patterns.py +++ b/src/pyclm/core/patterns/feedback_control_patterns.py @@ -9,34 +9,31 @@ import tifffile class PerCellPatternMethod(PatternMethod): + """ + Base class for the per-cell pattern methods. This class provides the basic structure for generating patterns + based on the properties of segmented cells. Subclasses should implement the `process_prop` method to define + how each cell's properties are used to generate the pattern, and include any additional parameters needed for the + specific pattern in their `__init__` method. + """ name = "per_cell_base" - def __init__(self, experiment_name, camera_properties, channel=None, voronoi=False, gradient=False, direction = 1, **kwargs): - super().__init__(experiment_name, camera_properties, **kwargs) + def __init__(self, channel=None, voronoi=False, gradient=False, direction = 1, **kwargs): + super().__init__(**kwargs) if channel is None: - raise AttributeError(f"{experiment_name}: PerCellPatternModels must be provided a " - f"segmentation channel in the .toml: e.g. channel = \"638\"") - - self.channel = channel - self.seg_channel_id = None + raise AttributeError("PerCellPatternModels must be provided a " + "segmentation channel in the .toml: e.g. channel = \"638\"") self.gradient = gradient self.voronoi = voronoi self.direction = direction - def initialize(self, experiment): - super().initialize(experiment) - - channel = experiment.channels.get(self.channel, None) - - assert channel is not None, f"provided channel {self.channel} is not in experiment" - - self.seg_channel_id = channel.channel_id - cell_seg_air = AcquiredImageRequest(channel.channel_id, False, True) + self.channel = channel - return [cell_seg_air] + # request the segmentation of the provided channel, to be used by self.generate. + # This will ensure that the data is available when generate is called. + self.add_requirement(channel_name=channel, seg=True) def prop_vector(self, prop, vec): """ @@ -86,9 +83,9 @@ def voronoi_rebuild(self, img): return out - def generate(self, data_dock: DataDock) -> np.ndarray: + def generate(self, context) -> np.ndarray: - seg: np.ndarray = data_dock.data[self.seg_channel_id]["seg"].data + seg = context.segmentation(self.channel) if self.voronoi: px_dis = distance_transform_edt(seg == 0) @@ -114,9 +111,9 @@ class RotateCcwModel(PerCellPatternMethod): name = "rotate_ccw" - def __init__(self, experiment_name, camera_properties, channel=None, **kwargs): + def __init__(self, channel=None, **kwargs): - super().__init__(experiment_name, camera_properties, channel, **kwargs) + super().__init__(channel=channel, **kwargs) def process_prop(self, prop) -> np.ndarray: @@ -132,9 +129,9 @@ class MoveOutModel(PerCellPatternMethod): name = "move_out" - def __init__(self, experiment_name, camera_properties, channel=None, **kwargs): + def __init__(self, channel=None, **kwargs): - super().__init__(experiment_name, camera_properties, channel, **kwargs) + super().__init__(channel=channel, **kwargs) def process_prop(self, prop) -> np.ndarray: @@ -150,9 +147,9 @@ class MoveInModel(PerCellPatternMethod): name = "move_in" - def __init__(self, experiment_name, camera_properties, channel=None, **kwargs): + def __init__(self, channel=None, **kwargs): - super().__init__(experiment_name, camera_properties, channel, **kwargs) + super().__init__(channel=channel, **kwargs) def process_prop(self, prop) -> np.ndarray: @@ -168,9 +165,9 @@ class MoveDownModel(PerCellPatternMethod): name = "move_down" - def __init__(self, experiment_name, camera_properties, channel=None, **kwargs): + def __init__(self, channel=None, **kwargs): - super().__init__(experiment_name, camera_properties, channel, **kwargs) + super().__init__(channel=channel, **kwargs) def process_prop(self, prop) -> np.ndarray: @@ -186,12 +183,12 @@ class BounceModel(PerCellPatternMethod): name = "fb_bounce" - def __init__(self, experiment_name, camera_properties, channel=None, t_loop=60, **kwargs): + def __init__(self, channel=None, t_loop=60, **kwargs): self.t_loop_s = t_loop * 60 self.down = True - super().__init__(experiment_name, camera_properties, channel, **kwargs) + super().__init__(channel=channel, **kwargs) def process_prop(self, prop) -> np.ndarray: @@ -202,9 +199,9 @@ def process_prop(self, prop) -> np.ndarray: return self.prop_vector(prop, vec) - def generate(self, data_dock: DataDock) -> np.ndarray: + def generate(self, context) -> np.ndarray: - t = data_dock.time_seconds + t = context.time t = t % self.t_loop_s halfway = self.t_loop_s / 2 @@ -212,20 +209,20 @@ def generate(self, data_dock: DataDock) -> np.ndarray: if t > halfway: self.down = False - return super().generate(data_dock) + return super().generate(context) class DensityModel(PerCellPatternMethod): name = "density_gradient" - def __init__(self, experiment_name, camera_properties, channel=None, **kwargs): + def __init__(self, channel=None, **kwargs): - super().__init__(experiment_name, camera_properties, channel, **kwargs) + super().__init__(channel=channel, **kwargs) - def generate(self, data_dock: DataDock) -> np.ndarray: + def generate(self, context) -> np.ndarray: - seg: np.ndarray = data_dock.data[self.seg_channel_id]["seg"].data + seg = context.segmentation(self.channel) if self.voronoi: px_dis = distance_transform_edt(seg == 0) diff --git a/src/pyclm/core/patterns/ktr_patterns.py b/src/pyclm/core/patterns/ktr_patterns.py index 231c714..adbf359 100644 --- a/src/pyclm/core/patterns/ktr_patterns.py +++ b/src/pyclm/core/patterns/ktr_patterns.py @@ -9,35 +9,23 @@ class NucleusControlMethod(PatternMethod): name = "nucleus_control_base" - def __init__(self, experiment_name, camera_properties, nuc_channel=None, **kwargs): - super().__init__(experiment_name, camera_properties, **kwargs) + def __init__(self, channel=None, **kwargs): + super().__init__(**kwargs) - if nuc_channel is None: - raise AttributeError(f"{experiment_name}: PerCellPatternModels must be provided a " - f"segmentation channel in the .toml: e.g. nuc_channel = \"545\"") + if channel is None: + raise AttributeError("NucleusControlMethod must be provided a " + "segmentation channel in the .toml: e.g. nuc_channel = \"545\"") - self.nuc_channel = nuc_channel - self.seg_channel_id = None - - def initialize(self, experiment): - super().initialize(experiment) - - channel = experiment.channels.get(self.nuc_channel, None) - - assert channel is not None, f"provided channel {self.nuc_channel} is not in experiment" - - self.seg_channel_id = channel.channel_id - cell_seg_air = AcquiredImageRequest(channel.channel_id, True, True) - - return [cell_seg_air] + self.channel = channel + self.add_requirement(channel, raw=True, seg=True) def process_prop(self, prop): return prop.image - def generate(self, data_dock: DataDock) -> np.ndarray: + def generate(self, context) -> np.ndarray: - seg: np.ndarray = data_dock.data[self.seg_channel_id]["seg"].data - raw: np.ndarray = data_dock.data[self.seg_channel_id]["raw"].data + seg = context.segmentation(self.channel) + raw = context.raw(self.channel) h, w = self.pattern_shape @@ -57,9 +45,9 @@ class BinaryNucleusClampModel(NucleusControlMethod): name = "binary_nucleus_clamp" - def __init__(self, experiment_name, camera_properties, nuc_channel, clamp_target, **kwargs): + def __init__(self, channel, clamp_target, **kwargs): - super().__init__(experiment_name, camera_properties, nuc_channel, **kwargs) + super().__init__(channel=channel, **kwargs) self.clamp_target = clamp_target @@ -74,10 +62,10 @@ class CenteredImageModel(NucleusControlMethod): name = "centered_image" - def __init__(self, experiment_name, camera_properties, nuc_channel="545", tif_path=None, + def __init__(self, channel="545", tif_path=None, min_intensity=2000, max_intensity=5000, **kwargs): - super().__init__(experiment_name, camera_properties, nuc_channel, **kwargs) + super().__init__(channel=channel, **kwargs) self.img = tifffile.imread(tif_path) self.min_intensity = min_intensity @@ -135,27 +123,27 @@ def process_prop(self, prop): return prop.image - def generate(self, data_dock: DataDock) -> np.ndarray: + def generate(self, context) -> np.ndarray: if self.target_image is None: self.make_target_image() - return super().generate(data_dock) + return super().generate(context) class GlobalCycleModel(NucleusControlMethod): name = "global_cycle" - def __init__(self, experiment_name, camera_properties, nuc_channel, period_m=10, **kwargs): + def __init__(self, channel, period_m=10, **kwargs): - super().__init__(experiment_name, camera_properties, nuc_channel, **kwargs) + super().__init__(channel=channel, **kwargs) self.period_s = period_m * 60 - def generate(self, data_dock: DataDock) -> np.ndarray: + def generate(self, context) -> np.ndarray: - t = data_dock.time_seconds + t = context.time is_on = ((t // self.period_s) % 2) == 0 diff --git a/src/pyclm/core/patterns/pattern.py b/src/pyclm/core/patterns/pattern.py index 8bf586b..76628c0 100644 --- a/src/pyclm/core/patterns/pattern.py +++ b/src/pyclm/core/patterns/pattern.py @@ -1,7 +1,7 @@ from ..experiments import Experiment from ..datatypes import AcquisitionData, SegmentationData import logging -from typing import NamedTuple, Union +from typing import NamedTuple, Union, Any from uuid import uuid4, UUID from collections import defaultdict from pathlib import Path @@ -75,24 +75,85 @@ def check_complete(self): return len(self.get_awaiting()) == 0 +class PatternContext: + def __init__(self, data_dock: DataDock, experiment: Experiment): + self._dock = data_dock + self._experiment = experiment + self._channel_map = {name: ch.channel_id for name, ch in experiment.channels.items()} + + @property + def time(self) -> float: + return self._dock.time_seconds + + def _get_channel_id(self, channel_name: str) -> UUID: + if channel_name not in self._channel_map: + raise ValueError(f"Channel '{channel_name}' not found in experiment.") + return self._channel_map[channel_name] + + def raw(self, channel_name: str) -> np.ndarray: + """Get raw image for a channel.""" + cid = self._get_channel_id(channel_name) + if cid not in self._dock.data or "raw" not in self._dock.data[cid]: + raise ValueError(f"Raw data for channel '{channel_name}' was not requested.") + data = self._dock.data[cid]["raw"] + return data.data if data else None + + def segmentation(self, channel_name: str) -> np.ndarray: + """Get segmentation mask for a channel.""" + cid = self._get_channel_id(channel_name) + if cid not in self._dock.data or "seg" not in self._dock.data[cid]: + raise ValueError(f"Segmentation data for channel '{channel_name}' was not requested.") + data = self._dock.data[cid]["seg"] + return data.data if data else None + + + class PatternMethod: name = "base" - def __init__(self, experiment_name, camera_properties: CameraProperties, **kwargs): - self.experiment = experiment_name + def __init__(self, experiment_name=None, camera_properties: CameraProperties=None, **kwargs): + # Support legacy init where these are passed + self.experiment_name = experiment_name + self.camera_properties = camera_properties + if camera_properties: + self.pixel_size_um = camera_properties.pixel_size_um + self.pattern_shape = (camera_properties.roi.height, camera_properties.roi.width) + else: + self.pixel_size_um = 1.0 + self.pattern_shape = (100, 100) # Default placeholders + + self.binning = 1 + self._requirements_list = [] # List of (channel_name, raw, seg) + self._experiment_ref = None + + def configure_system(self, experiment_name: str, camera_properties: CameraProperties, experiment: Experiment): + """Called by the system to inject dependencies.""" + self.experiment_name = experiment_name self.camera_properties = camera_properties self.pixel_size_um = camera_properties.pixel_size_um self.pattern_shape = (camera_properties.roi.height, camera_properties.roi.width) - self.binning = 1 + self._experiment_ref = experiment + + def add_requirement(self, channel_name: str, raw: bool = False, seg: bool = False): + """Declarative way to add requirements.""" + self._requirements_list.append((channel_name, raw, seg)) def initialize(self, experiment: Experiment) -> list[AcquiredImageRequest]: + + # If user used add_requirement, process them + reqs = [] + for name, needs_raw, needs_seg in self._requirements_list: + ch = experiment.channels.get(name) + if ch: + reqs.append(AcquiredImageRequest(ch.channel_id, needs_raw, needs_seg)) + else: + logger.warning(f"Pattern {self.name} requested unknown channel {name}") binning = experiment.stimulation.binning - self.update_binning(binning) - - return [] + + return reqs def get_meshgrid(self) -> tuple[np.ndarray, np.ndarray]: h, w = self.pattern_shape @@ -104,7 +165,10 @@ def get_meshgrid(self) -> tuple[np.ndarray, np.ndarray]: return xx, yy - def generate(self, data_dock: DataDock) -> np.ndarray: + def generate(self, data_dock: Union[DataDock, PatternContext]) -> np.ndarray: + # If passed PatternContext, user is using new API. + # But if user implemented old generate(self, data_dock: DataDock), we need to support that. + # This method is called by the system. raise NotImplementedError def update_binning(self, binning: int): diff --git a/src/pyclm/core/patterns/pattern_process.py b/src/pyclm/core/patterns/pattern_process.py index 4570558..585fab8 100644 --- a/src/pyclm/core/patterns/pattern_process.py +++ b/src/pyclm/core/patterns/pattern_process.py @@ -6,7 +6,7 @@ from ..datatypes import CameraPattern, AcquisitionData, SegmentationData from ..experiments import Experiment from ..messages import Message -from .pattern import PatternReview, PatternMethod, PatternMethodReturnsSLM, DataDock, AcquiredImageRequest, CameraProperties +from .pattern import PatternReview, PatternMethod, PatternMethodReturnsSLM, DataDock, AcquiredImageRequest, CameraProperties, PatternContext from .bar_patterns import BouncingBarPattern, BarPatternBase, SawToothMethod from .static_patterns import CirclePattern, FullOnPattern from .feedback_control_patterns import RotateCcwModel, MoveInModel, MoveDownModel, MoveOutModel, BounceModel @@ -97,15 +97,21 @@ def run_model(self, experiment_name, dockname): assert isinstance(model, PatternMethod), f"self.models[{'experiment_name'}] is not a PatternMethod" - if isinstance(model, PatternMethodReturnsSLM): - slm_pattern = model.generate(data_dock) + # Create context wrapper + + # We assume _experiment_ref is available. If not, we might crash, which is acceptable for alpha breakage. + # Ideally, we ensure configure_system is called. + if model._experiment_ref is None: + raise RuntimeError(f"Model {model.name} for {experiment_name} was not properly configured with an experiment reference.") + + context = PatternContext(data_dock, model._experiment_ref) + if isinstance(model, PatternMethodReturnsSLM): + slm_pattern = model.generate(context) self.slm.put(CameraPattern(experiment_name, slm_pattern, slm_coords=True)) else: - - pattern = model.generate(data_dock) - + pattern = model.generate(context) self.slm.put(CameraPattern(experiment_name, pattern, slm_coords=False, binning=model.binning)) def dock_string(self, experiment_name, t): diff --git a/src/pyclm/core/patterns/static_patterns.py b/src/pyclm/core/patterns/static_patterns.py index c62283e..4d1bb11 100644 --- a/src/pyclm/core/patterns/static_patterns.py +++ b/src/pyclm/core/patterns/static_patterns.py @@ -1,6 +1,6 @@ import numpy as np -from .pattern import PatternMethod, DataDock +from .pattern import PatternMethod class CirclePattern(PatternMethod): @@ -10,18 +10,12 @@ class CirclePattern(PatternMethod): name = "circle" - def __init__(self, experiment_name, camera_properties, rad=1, **kwargs): - super().__init__(experiment_name, camera_properties) + def __init__(self, rad=1, **kwargs): + super().__init__(**kwargs) self.rad = rad - def initialize(self, experiment): - - super().initialize(experiment) - - return [] - - def generate(self, data_dock: DataDock): + def generate(self, context): h, w = self.pattern_shape @@ -32,8 +26,6 @@ def generate(self, data_dock: DataDock): print(h, w) - # pattern = np.ones((int(h), int(w)), dtype=np.float16) - return (((xx - center_x)**2 + (yy - center_y)**2) < (self.rad**2)).astype(np.float16) @@ -44,14 +36,10 @@ class FullOnPattern(PatternMethod): name = "full_on" - def __init__(self, experiment_name, camera_properties, **kwargs): - super().__init__(experiment_name, camera_properties) - - def initialize(self, experiment): - super().initialize(experiment) - return [] + def __init__(self, **kwargs): + super().__init__(**kwargs) - def generate(self, data_dock: DataDock): + def generate(self, context): h, w = self.pattern_shape pattern = np.ones((int(h), int(w)), dtype=np.float16) diff --git a/src/pyclm/core/patterns/wave_patterns.py b/src/pyclm/core/patterns/wave_patterns.py index 907af88..d96dccf 100644 --- a/src/pyclm/core/patterns/wave_patterns.py +++ b/src/pyclm/core/patterns/wave_patterns.py @@ -1,6 +1,6 @@ import numpy as np -from .pattern import DataDock, PatternMethod +from .pattern import PatternMethod class WavePatternBase(PatternMethod): @@ -16,7 +16,6 @@ def __new__(cls, *args, **kwargs): return super().__new__(cls) def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) print(f"Initializing {self.__class__.__name__}") @@ -27,26 +26,21 @@ class StationaryWavePattern(WavePatternBase): name = "wave (stationary)" - def __init__(self, experiment_name, camera_properties, duty_cycle=0.2, wave_speed=0, period=30, **kwargs): + def __init__(self, duty_cycle=0.2, wave_speed=0, period=30, **kwargs): """ :param duty_cycle: fraction of time spent on (float 0-1), and consequently fraction of vertical axis containing "on" pixels :param wave_speed: speed in um/min :param period: period in um """ - super().__init__(experiment_name, camera_properties) + super().__init__(**kwargs) self.duty_cycle = duty_cycle self.wave_speed = 0 self.period_space = period # in um self.period_time = 0 # in minutes - def initialize(self, experiment): - super().initialize(experiment) - - return [] - - def generate(self, data_dock: DataDock): + def generate(self, context): xx, yy = self.get_meshgrid() center_y, center_x = self.pattern_shape[0] / 2, self.pattern_shape[1] / 2 @@ -64,7 +58,7 @@ class WavePattern(WavePatternBase): name = "wave" - def __init__(self, experiment_name, camera_properties, duty_cycle=0.2, wave_speed=1, period=30, direction = 1, **kwargs): + def __init__(self, duty_cycle=0.2, wave_speed=1, period=30, direction = 1, **kwargs): """ :param duty_cycle: fraction of time spent on (float 0-1), and consequently fraction of radial axis containing "on" pixels @@ -72,7 +66,7 @@ def __init__(self, experiment_name, camera_properties, duty_cycle=0.2, wave_spee :param period: period in um :param direction: movement in/out relative to the center. 1 is out; -1 is in """ - super().__init__(experiment_name, camera_properties) + super().__init__(**kwargs) self.duty_cycle = duty_cycle self.wave_speed = wave_speed @@ -80,14 +74,9 @@ def __init__(self, experiment_name, camera_properties, duty_cycle=0.2, wave_spee self.period_time = period / wave_speed # in minutes self.direction = direction - def initialize(self, experiment): - super().initialize(experiment) - - return [] - - def generate(self, data_dock: DataDock): + def generate(self, context): - t = data_dock.time_seconds / 60 + t = context.time / 60 xx, yy = self.get_meshgrid() center_y, center_x = self.pattern_shape[0] / 2, self.pattern_shape[1] / 2 From 22f60a4504b92e0f3142cb0c1af6d20be621efda Mon Sep 17 00:00:00 2001 From: Harrison-Oatman <46121828+Harrison-Oatman@users.noreply.github.com> Date: Thu, 8 Jan 2026 02:25:11 -0500 Subject: [PATCH 2/3] Fix imports --- pyproject.toml | 1 + src/pyclm/core/__init__.py | 1 - tests/test_import_pyclm.py | 4 ++++ uv.lock | 19 +++++++++++++++++++ 4 files changed, 24 insertions(+), 1 deletion(-) create mode 100644 tests/test_import_pyclm.py diff --git a/pyproject.toml b/pyproject.toml index 40972bc..6543e95 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,6 +11,7 @@ dependencies = [ "h5py>=3.14.0", "natsort>=8.4.0", "notebook>=7.5.1", + "opencv-python>=4.12.0.88", "pandas-stubs==2.3.2.250827", "pymmcore-plus==0.13.7", "pymmcore==11.2.1.71.0", diff --git a/src/pyclm/core/__init__.py b/src/pyclm/core/__init__.py index 6f2ad93..0678b0f 100644 --- a/src/pyclm/core/__init__.py +++ b/src/pyclm/core/__init__.py @@ -3,7 +3,6 @@ Import modules that may be used in creating custom pattern and segmentation methods """ -from ..directories import experiment_from_toml, schedule_from_directory from .experiments import ExperimentSchedule from .manager import Manager, MicroscopeOutbox, SLMBuffer from .microscope import MicroscopeProcess diff --git a/tests/test_import_pyclm.py b/tests/test_import_pyclm.py new file mode 100644 index 0000000..e5c10fb --- /dev/null +++ b/tests/test_import_pyclm.py @@ -0,0 +1,4 @@ +def test_import_pyclm(): + import pyclm + + assert True diff --git a/uv.lock b/uv.lock index c14e604..15478dc 100644 --- a/uv.lock +++ b/uv.lock @@ -1479,6 +1479,23 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/67/0e/35082d13c09c02c011cf21570543d202ad929d961c02a147493cb0c2bdf5/numpy-2.2.6-cp313-cp313t-win_amd64.whl", hash = "sha256:6031dd6dfecc0cf9f668681a37648373bddd6421fff6c66ec1624eed0180ee06", size = 12771374, upload-time = "2025-05-17T21:43:35.479Z" }, ] +[[package]] +name = "opencv-python" +version = "4.12.0.88" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ac/71/25c98e634b6bdeca4727c7f6d6927b056080668c5008ad3c8fc9e7f8f6ec/opencv-python-4.12.0.88.tar.gz", hash = "sha256:8b738389cede219405f6f3880b851efa3415ccd674752219377353f017d2994d", size = 95373294, upload-time = "2025-07-07T09:20:52.389Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/85/68/3da40142e7c21e9b1d4e7ddd6c58738feb013203e6e4b803d62cdd9eb96b/opencv_python-4.12.0.88-cp37-abi3-macosx_13_0_arm64.whl", hash = "sha256:f9a1f08883257b95a5764bf517a32d75aec325319c8ed0f89739a57fae9e92a5", size = 37877727, upload-time = "2025-07-07T09:13:31.47Z" }, + { url = "https://files.pythonhosted.org/packages/33/7c/042abe49f58d6ee7e1028eefc3334d98ca69b030e3b567fe245a2b28ea6f/opencv_python-4.12.0.88-cp37-abi3-macosx_13_0_x86_64.whl", hash = "sha256:812eb116ad2b4de43ee116fcd8991c3a687f099ada0b04e68f64899c09448e81", size = 57326471, upload-time = "2025-07-07T09:13:41.26Z" }, + { url = "https://files.pythonhosted.org/packages/62/3a/440bd64736cf8116f01f3b7f9f2e111afb2e02beb2ccc08a6458114a6b5d/opencv_python-4.12.0.88-cp37-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:51fd981c7df6af3e8f70b1556696b05224c4e6b6777bdd2a46b3d4fb09de1a92", size = 45887139, upload-time = "2025-07-07T09:13:50.761Z" }, + { url = "https://files.pythonhosted.org/packages/68/1f/795e7f4aa2eacc59afa4fb61a2e35e510d06414dd5a802b51a012d691b37/opencv_python-4.12.0.88-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:092c16da4c5a163a818f120c22c5e4a2f96e0db4f24e659c701f1fe629a690f9", size = 67041680, upload-time = "2025-07-07T09:14:01.995Z" }, + { url = "https://files.pythonhosted.org/packages/02/96/213fea371d3cb2f1d537612a105792aa0a6659fb2665b22cad709a75bd94/opencv_python-4.12.0.88-cp37-abi3-win32.whl", hash = "sha256:ff554d3f725b39878ac6a2e1fa232ec509c36130927afc18a1719ebf4fbf4357", size = 30284131, upload-time = "2025-07-07T09:14:08.819Z" }, + { url = "https://files.pythonhosted.org/packages/fa/80/eb88edc2e2b11cd2dd2e56f1c80b5784d11d6e6b7f04a1145df64df40065/opencv_python-4.12.0.88-cp37-abi3-win_amd64.whl", hash = "sha256:d98edb20aa932fd8ebd276a72627dad9dc097695b3d435a4257557bbb49a79d2", size = 39000307, upload-time = "2025-07-07T09:14:16.641Z" }, +] + [[package]] name = "overrides" version = "7.7.0" @@ -1794,6 +1811,7 @@ dependencies = [ { name = "h5py" }, { name = "natsort" }, { name = "notebook" }, + { name = "opencv-python" }, { name = "pandas-stubs" }, { name = "pymmcore" }, { name = "pymmcore-plus" }, @@ -1834,6 +1852,7 @@ requires-dist = [ { name = "h5py", specifier = ">=3.14.0" }, { name = "natsort", specifier = ">=8.4.0" }, { name = "notebook", specifier = ">=7.5.1" }, + { name = "opencv-python", specifier = ">=4.12.0.88" }, { name = "pandas-stubs", specifier = "==2.3.2.250827" }, { name = "pymmcore", specifier = "==11.2.1.71.0" }, { name = "pymmcore-plus", specifier = "==0.13.7" }, From 9a3d9d72ff8f518fd83b32fd35f95b5c06c06fdc Mon Sep 17 00:00:00 2001 From: Harrison-Oatman <46121828+Harrison-Oatman@users.noreply.github.com> Date: Thu, 8 Jan 2026 02:25:35 -0500 Subject: [PATCH 3/3] Major pattern method API rework --- src/pyclm/core/patterns/bar_patterns.py | 44 ++++--- .../patterns/feedback_control_patterns.py | 60 ++++----- src/pyclm/core/patterns/ktr_patterns.py | 47 +++---- src/pyclm/core/patterns/pattern.py | 120 +++++++++++------- src/pyclm/core/patterns/pattern_process.py | 112 +++++++++------- src/pyclm/core/patterns/static_patterns.py | 10 +- src/pyclm/core/patterns/wave_patterns.py | 26 ++-- 7 files changed, 235 insertions(+), 184 deletions(-) diff --git a/src/pyclm/core/patterns/bar_patterns.py b/src/pyclm/core/patterns/bar_patterns.py index 7956912..bfd736e 100644 --- a/src/pyclm/core/patterns/bar_patterns.py +++ b/src/pyclm/core/patterns/bar_patterns.py @@ -7,6 +7,7 @@ class BarPatternBase(PatternMethod): """ Creates a BarPattern or StationaryBarPattern depending on the requested barspeed """ + def __new__(cls, *args, **kwargs): if cls is BarPatternBase: # Check if the base class is being instantiated if kwargs.get("bar_speed") != 0: @@ -38,12 +39,11 @@ def __init__(self, duty_cycle=0.2, bar_speed=0, period=30, **kwargs): self.duty_cycle = duty_cycle self.bar_speed = 0 - self.period_space = period # in um - self.period_time = 0 # in minutes + self.period_space = period # in um + self.period_time = 0 # in minutes def generate(self, context): - - xx, yy = self.get_meshgrid() + _xx, yy = self.get_meshgrid() is_on = ((yy / self.period_space) % 1.0) < self.duty_cycle @@ -68,24 +68,24 @@ def __init__(self, duty_cycle=0.2, bar_speed=1, period=30, **kwargs): self.duty_cycle = duty_cycle self.bar_speed = bar_speed - self.period_space = period # in um - self.period_time = period / bar_speed # in minutes + self.period_space = period # in um + self.period_time = period / bar_speed # in minutes def _get_pattern_at_time(self, t_minutes): - xx, yy = self.get_meshgrid() - is_on = ((t_minutes - (yy / self.bar_speed)) % self.period_time) < self.duty_cycle*self.period_time - return is_on.astype(np.float16) + _xx, yy = self.get_meshgrid() + is_on = ( + (t_minutes - (yy / self.bar_speed)) % self.period_time + ) < self.duty_cycle * self.period_time + return is_on.astype(np.float16) def generate(self, context): return self._get_pattern_at_time(context.time / 60) class SawToothMethod(PatternMethod): - name = "sawtooth" - def __init__(self, duty_cycle=0.2, - bar_speed=1, period=30, inverse=False, **kwargs): + def __init__(self, duty_cycle=0.2, bar_speed=1, period=30, inverse=False, **kwargs): """ :param duty_cycle: fraction of time spent on (float 0-1), and consequently fraction of vertical axis containing "on" pixels @@ -103,16 +103,20 @@ def __init__(self, duty_cycle=0.2, def generate(self, context): t = context.time / 60 - xx, yy = self.get_meshgrid() + _xx, yy = self.get_meshgrid() - is_on = ((t - (yy / self.bar_speed)) % self.period_time) < self.duty_cycle * self.period_time + is_on = ( + (t - (yy / self.bar_speed)) % self.period_time + ) < self.duty_cycle * self.period_time - val = ((t - (yy / self.bar_speed)) % self.period_time) / (self.duty_cycle * self.period_time) + val = ((t - (yy / self.bar_speed)) % self.period_time) / ( + self.duty_cycle * self.period_time + ) if not self.inverse: val = 1 - val - pattern_out = (is_on*val).astype(np.float16) + pattern_out = (is_on * val).astype(np.float16) print(np.min(pattern_out), np.max(pattern_out)) @@ -120,11 +124,9 @@ def generate(self, context): class BouncingBarPattern(BarPattern): - name = "bar_bounce" - def __init__(self, duty_cycle=0.2, - bar_speed=1, period=30, t_loop=60, **kwargs): + def __init__(self, duty_cycle=0.2, bar_speed=1, period=30, t_loop=60, **kwargs): """ :param duty_cycle: fraction of time spent on (float 0-1), and consequently fraction of @@ -133,7 +135,9 @@ def __init__(self, duty_cycle=0.2, :param period: period in um :param t_loop: period of reversal (there and back) in minutes """ - super().__init__(duty_cycle=duty_cycle, bar_speed=bar_speed, period=period, **kwargs) + super().__init__( + duty_cycle=duty_cycle, bar_speed=bar_speed, period=period, **kwargs + ) self.t_loop_s = t_loop * 60 def generate(self, context): diff --git a/src/pyclm/core/patterns/feedback_control_patterns.py b/src/pyclm/core/patterns/feedback_control_patterns.py index 991f84c..3b15dc4 100644 --- a/src/pyclm/core/patterns/feedback_control_patterns.py +++ b/src/pyclm/core/patterns/feedback_control_patterns.py @@ -1,12 +1,12 @@ import numpy as np - -from .pattern import PatternMethod, AcquiredImageRequest, DataDock -from skimage.measure import regionprops, regionprops_table, label +import tifffile +from scipy.ndimage import distance_transform_edt from scipy.spatial import KDTree from scipy.stats import gaussian_kde -from scipy.ndimage import distance_transform_edt +from skimage.measure import label, regionprops, regionprops_table + +from .pattern import AcquiredImageRequest, DataDock, PatternMethod -import tifffile class PerCellPatternMethod(PatternMethod): """ @@ -18,12 +18,16 @@ class PerCellPatternMethod(PatternMethod): name = "per_cell_base" - def __init__(self, channel=None, voronoi=False, gradient=False, direction = 1, **kwargs): + def __init__( + self, channel=None, voronoi=False, gradient=False, direction=1, **kwargs + ): super().__init__(**kwargs) if channel is None: - raise AttributeError("PerCellPatternModels must be provided a " - "segmentation channel in the .toml: e.g. channel = \"638\"") + raise AttributeError( + "PerCellPatternModels must be provided a " + 'segmentation channel in the .toml: e.g. channel = "638"' + ) self.gradient = gradient self.voronoi = voronoi @@ -59,11 +63,9 @@ def prop_vector(self, prop, vec): return out def process_prop(self, prop) -> np.ndarray: - return prop.image def voronoi_rebuild(self, img): - props_table = regionprops_table(img, properties=["centroid"]) pts = np.stack([props_table["centroid-0"], props_table["centroid-1"]], axis=-1) @@ -84,7 +86,6 @@ def voronoi_rebuild(self, img): return out def generate(self, context) -> np.ndarray: - seg = context.segmentation(self.channel) if self.voronoi: @@ -100,7 +101,9 @@ def generate(self, context) -> np.ndarray: for prop in regionprops(seg): cell_stim = self.process_prop(prop) - new_img[prop.bbox[0]:prop.bbox[2], prop.bbox[1]:prop.bbox[3]] += cell_stim + new_img[prop.bbox[0] : prop.bbox[2], prop.bbox[1] : prop.bbox[3]] += ( + cell_stim + ) new_img_clamped = np.clip(new_img, 0, 1).astype(np.float16) @@ -108,15 +111,12 @@ def generate(self, context) -> np.ndarray: class RotateCcwModel(PerCellPatternMethod): - name = "rotate_ccw" def __init__(self, channel=None, **kwargs): - super().__init__(channel=channel, **kwargs) def process_prop(self, prop) -> np.ndarray: - center_y, center_x = self.pattern_shape[0] / 2, self.pattern_shape[1] / 2 prop_centroid = prop.centroid @@ -126,15 +126,12 @@ def process_prop(self, prop) -> np.ndarray: class MoveOutModel(PerCellPatternMethod): - name = "move_out" def __init__(self, channel=None, **kwargs): - super().__init__(channel=channel, **kwargs) def process_prop(self, prop) -> np.ndarray: - center_y, center_x = self.pattern_shape[0] / 2, self.pattern_shape[1] / 2 prop_centroid = prop.centroid @@ -144,15 +141,12 @@ def process_prop(self, prop) -> np.ndarray: class MoveInModel(PerCellPatternMethod): - name = "move_in" def __init__(self, channel=None, **kwargs): - super().__init__(channel=channel, **kwargs) def process_prop(self, prop) -> np.ndarray: - center_y, center_x = self.pattern_shape[0] / 2, self.pattern_shape[1] / 2 prop_centroid = prop.centroid @@ -162,15 +156,12 @@ def process_prop(self, prop) -> np.ndarray: class MoveDownModel(PerCellPatternMethod): - name = "move_down" def __init__(self, channel=None, **kwargs): - super().__init__(channel=channel, **kwargs) def process_prop(self, prop) -> np.ndarray: - center_y, center_x = self.pattern_shape[0] / 2, self.pattern_shape[1] / 2 prop_centroid = prop.centroid @@ -180,18 +171,15 @@ def process_prop(self, prop) -> np.ndarray: class BounceModel(PerCellPatternMethod): - name = "fb_bounce" def __init__(self, channel=None, t_loop=60, **kwargs): - self.t_loop_s = t_loop * 60 self.down = True super().__init__(channel=channel, **kwargs) def process_prop(self, prop) -> np.ndarray: - center_y, center_x = self.pattern_shape[0] / 2, self.pattern_shape[1] / 2 prop_centroid = prop.centroid @@ -200,7 +188,6 @@ def process_prop(self, prop) -> np.ndarray: return self.prop_vector(prop, vec) def generate(self, context) -> np.ndarray: - t = context.time t = t % self.t_loop_s @@ -213,15 +200,12 @@ def generate(self, context) -> np.ndarray: class DensityModel(PerCellPatternMethod): - name = "density_gradient" def __init__(self, channel=None, **kwargs): - super().__init__(channel=channel, **kwargs) def generate(self, context) -> np.ndarray: - seg = context.segmentation(self.channel) if self.voronoi: @@ -229,14 +213,14 @@ def generate(self, context) -> np.ndarray: seg = self.voronoi_rebuild(seg) seg = seg * (px_dis < 50) - + h, w = self.pattern_shape new_img = np.zeros((int(h), int(w)), dtype=np.float16) - + density = generate_density(seg) - if (self.direction == -1): + if self.direction == -1: dy, dx = np.gradient(density) else: dy, dx = np.negative(np.gradient(density)) @@ -250,9 +234,11 @@ def generate(self, context) -> np.ndarray: vec = (np.sin(vec), np.cos(vec)) cell_stim = self.prop_vector(prop, vec) - - new_img[prop.bbox[0]:prop.bbox[2], prop.bbox[1]:prop.bbox[3]] += cell_stim + + new_img[prop.bbox[0] : prop.bbox[2], prop.bbox[1] : prop.bbox[3]] += ( + cell_stim + ) new_img_clamped = np.clip(new_img, 0, 1).astype(np.float16) - return new_img_clamped \ No newline at end of file + return new_img_clamped diff --git a/src/pyclm/core/patterns/ktr_patterns.py b/src/pyclm/core/patterns/ktr_patterns.py index adbf359..ef8e0b9 100644 --- a/src/pyclm/core/patterns/ktr_patterns.py +++ b/src/pyclm/core/patterns/ktr_patterns.py @@ -1,20 +1,21 @@ import numpy as np - -from .pattern import PatternMethod, AcquiredImageRequest, DataDock -from skimage.measure import regionprops import tifffile +from skimage.measure import regionprops +from .pattern import AcquiredImageRequest, DataDock, PatternMethod -class NucleusControlMethod(PatternMethod): +class NucleusControlMethod(PatternMethod): name = "nucleus_control_base" def __init__(self, channel=None, **kwargs): super().__init__(**kwargs) if channel is None: - raise AttributeError("NucleusControlMethod must be provided a " - "segmentation channel in the .toml: e.g. nuc_channel = \"545\"") + raise AttributeError( + "NucleusControlMethod must be provided a " + 'segmentation channel in the .toml: e.g. nuc_channel = "545"' + ) self.channel = channel self.add_requirement(channel, raw=True, seg=True) @@ -23,7 +24,6 @@ def process_prop(self, prop): return prop.image def generate(self, context) -> np.ndarray: - seg = context.segmentation(self.channel) raw = context.raw(self.channel) @@ -34,7 +34,9 @@ def generate(self, context) -> np.ndarray: for prop in regionprops(seg, intensity_image=raw): cell_stim = self.process_prop(prop) - new_img[prop.bbox[0]:prop.bbox[2], prop.bbox[1]:prop.bbox[3]] += cell_stim + new_img[prop.bbox[0] : prop.bbox[2], prop.bbox[1] : prop.bbox[3]] += ( + cell_stim + ) new_img_clamped = np.clip(new_img, 0, 1).astype(np.float16) @@ -42,11 +44,9 @@ def generate(self, context) -> np.ndarray: class BinaryNucleusClampModel(NucleusControlMethod): - name = "binary_nucleus_clamp" def __init__(self, channel, clamp_target, **kwargs): - super().__init__(channel=channel, **kwargs) self.clamp_target = clamp_target @@ -59,12 +59,16 @@ def process_prop(self, prop): class CenteredImageModel(NucleusControlMethod): - name = "centered_image" - def __init__(self, channel="545", tif_path=None, - min_intensity=2000, max_intensity=5000, **kwargs): - + def __init__( + self, + channel="545", + tif_path=None, + min_intensity=2000, + max_intensity=5000, + **kwargs, + ): super().__init__(channel=channel, **kwargs) self.img = tifffile.imread(tif_path) @@ -77,7 +81,6 @@ def __init__(self, channel="545", tif_path=None, # def def make_target_image(self): - img = self.img h, w = self.pattern_shape @@ -90,7 +93,9 @@ def make_target_image(self): padding_w_left = (w - img.shape[1]) // 2 padding_w_right = w - (padding_w_left + img.shape[1]) - padded_img = np.pad(img, ((padding_h_up, padding_h_down), (padding_w_left, padding_w_right))) + padded_img = np.pad( + img, ((padding_h_up, padding_h_down), (padding_w_left, padding_w_right)) + ) padded_img = np.array(padded_img, dtype=np.float16) max_v = np.max(img) @@ -99,14 +104,15 @@ def make_target_image(self): padded_img = (padded_img - min_v) / (max_v - min_v) padded_img = np.clip(padded_img, 0, 1) - padded_img = (padded_img * (self.max_intensity - self.min_intensity)) + self.min_intensity + padded_img = ( + padded_img * (self.max_intensity - self.min_intensity) + ) + self.min_intensity print(padded_img.shape) self.target_image = padded_img def get_target_intensity(self, prop): - y, x = prop.centroid y = round(y) @@ -115,7 +121,6 @@ def get_target_intensity(self, prop): return self.target_image[y, x] def process_prop(self, prop): - target = self.get_target_intensity(prop) if prop.intensity_mean > target: @@ -124,7 +129,6 @@ def process_prop(self, prop): return prop.image def generate(self, context) -> np.ndarray: - if self.target_image is None: self.make_target_image() @@ -132,17 +136,14 @@ def generate(self, context) -> np.ndarray: class GlobalCycleModel(NucleusControlMethod): - name = "global_cycle" def __init__(self, channel, period_m=10, **kwargs): - super().__init__(channel=channel, **kwargs) self.period_s = period_m * 60 def generate(self, context) -> np.ndarray: - t = context.time is_on = ((t // self.period_s) % 2) == 0 diff --git a/src/pyclm/core/patterns/pattern.py b/src/pyclm/core/patterns/pattern.py index 76628c0..a5a840e 100644 --- a/src/pyclm/core/patterns/pattern.py +++ b/src/pyclm/core/patterns/pattern.py @@ -1,25 +1,39 @@ -from ..experiments import Experiment -from ..datatypes import AcquisitionData, SegmentationData import logging -from typing import NamedTuple, Union, Any -from uuid import uuid4, UUID from collections import defaultdict from pathlib import Path -from h5py import File +from typing import Any, NamedTuple, Union +from uuid import UUID, uuid4 + import numpy as np +from h5py import File from natsort import natsorted +from ..datatypes import AcquisitionData, SegmentationData +from ..experiments import Experiment + logger = logging.getLogger(__name__) -ROI = NamedTuple("ROI", [("x_offset", int), ("y_offset", int), ("width", int), ("height", int)]) -CameraProperties = NamedTuple("CameraProperties", [("roi", ROI), ("pixel_size_um", float)]) -AcquiredImageRequest = NamedTuple("AcquiredImageRequest", [("id", UUID), ("needs_raw", bool), ("needs_seg", bool)]) +class ROI(NamedTuple): + x_offset: int + y_offset: int + width: int + height: int -class DataDock: - def __init__(self, time_seconds, requirements: list[AcquiredImageRequest]): +class CameraProperties(NamedTuple): + roi: ROI + pixel_size_um: float + + +class AcquiredImageRequest(NamedTuple): + id: UUID + needs_raw: bool + needs_seg: bool + +class DataDock: + def __init__(self, time_seconds, requirements: list[AcquiredImageRequest]): self.time_seconds = time_seconds self.requirements = requirements self.data = defaultdict(dict) @@ -34,31 +48,32 @@ def __init__(self, time_seconds, requirements: list[AcquiredImageRequest]): self.complete = self.check_complete() def add_raw(self, data: AcquisitionData): - channel_id = data.channel_id # ensure channel id was expected assert channel_id in self.data, "unexpected data passed to pattern module" # ensure raw data was expected and not already passed assert "raw" in self.data[channel_id], "raw data being passed, but not expected" - assert self.data[channel_id]["raw"] is None, f"expected none, found {self.data[channel_id]['raw']}" + assert self.data[channel_id]["raw"] is None, ( + f"expected none, found {self.data[channel_id]['raw']}" + ) self.data[channel_id]["raw"] = data def add_seg(self, data: SegmentationData): - channel_id = data.channel_id # ensure channel id was expected assert channel_id in self.data, "unexpected data passed to pattern module" # ensure raw data was expected and not already passed assert "seg" in self.data[channel_id], "seg data being passed, but not expected" - assert self.data[channel_id]["seg"] is None, f"expected none, found {self.data[channel_id]['seg']}" + assert self.data[channel_id]["seg"] is None, ( + f"expected none, found {self.data[channel_id]['seg']}" + ) self.data[channel_id]["seg"] = data def get_awaiting(self): - awaiting = [] for channel in self.data: @@ -71,7 +86,6 @@ def get_awaiting(self): return awaiting def check_complete(self): - return len(self.get_awaiting()) == 0 @@ -79,7 +93,9 @@ class PatternContext: def __init__(self, data_dock: DataDock, experiment: Experiment): self._dock = data_dock self._experiment = experiment - self._channel_map = {name: ch.channel_id for name, ch in experiment.channels.items()} + self._channel_map = { + name: ch.channel_id for name, ch in experiment.channels.items() + } @property def time(self) -> float: @@ -94,7 +110,9 @@ def raw(self, channel_name: str) -> np.ndarray: """Get raw image for a channel.""" cid = self._get_channel_id(channel_name) if cid not in self._dock.data or "raw" not in self._dock.data[cid]: - raise ValueError(f"Raw data for channel '{channel_name}' was not requested.") + raise ValueError( + f"Raw data for channel '{channel_name}' was not requested." + ) data = self._dock.data[cid]["raw"] return data.data if data else None @@ -102,32 +120,42 @@ def segmentation(self, channel_name: str) -> np.ndarray: """Get segmentation mask for a channel.""" cid = self._get_channel_id(channel_name) if cid not in self._dock.data or "seg" not in self._dock.data[cid]: - raise ValueError(f"Segmentation data for channel '{channel_name}' was not requested.") + raise ValueError( + f"Segmentation data for channel '{channel_name}' was not requested." + ) data = self._dock.data[cid]["seg"] return data.data if data else None - class PatternMethod: - name = "base" - def __init__(self, experiment_name=None, camera_properties: CameraProperties=None, **kwargs): + def __init__( + self, experiment_name=None, camera_properties: CameraProperties = None, **kwargs + ): # Support legacy init where these are passed self.experiment_name = experiment_name self.camera_properties = camera_properties if camera_properties: self.pixel_size_um = camera_properties.pixel_size_um - self.pattern_shape = (camera_properties.roi.height, camera_properties.roi.width) + self.pattern_shape = ( + camera_properties.roi.height, + camera_properties.roi.width, + ) else: self.pixel_size_um = 1.0 - self.pattern_shape = (100, 100) # Default placeholders - + self.pattern_shape = (100, 100) # Default placeholders + self.binning = 1 - self._requirements_list = [] # List of (channel_name, raw, seg) + self._requirements_list = [] # List of (channel_name, raw, seg) self._experiment_ref = None - def configure_system(self, experiment_name: str, camera_properties: CameraProperties, experiment: Experiment): + def configure_system( + self, + experiment_name: str, + camera_properties: CameraProperties, + experiment: Experiment, + ): """Called by the system to inject dependencies.""" self.experiment_name = experiment_name self.camera_properties = camera_properties @@ -140,7 +168,6 @@ def add_requirement(self, channel_name: str, raw: bool = False, seg: bool = Fals self._requirements_list.append((channel_name, raw, seg)) def initialize(self, experiment: Experiment) -> list[AcquiredImageRequest]: - # If user used add_requirement, process them reqs = [] for name, needs_raw, needs_seg in self._requirements_list: @@ -152,7 +179,7 @@ def initialize(self, experiment: Experiment) -> list[AcquiredImageRequest]: binning = experiment.stimulation.binning self.update_binning(binning) - + return reqs def get_meshgrid(self) -> tuple[np.ndarray, np.ndarray]: @@ -165,45 +192,55 @@ def get_meshgrid(self) -> tuple[np.ndarray, np.ndarray]: return xx, yy - def generate(self, data_dock: Union[DataDock, PatternContext]) -> np.ndarray: + def generate(self, data_dock: DataDock | PatternContext) -> np.ndarray: # If passed PatternContext, user is using new API. # But if user implemented old generate(self, data_dock: DataDock), we need to support that. # This method is called by the system. raise NotImplementedError def update_binning(self, binning: int): - binning_rescale = binning / self.binning self.pixel_size_um = self.pixel_size_um * binning_rescale - self.pattern_shape = (self.pattern_shape[0] // binning_rescale, self.pattern_shape[1] // binning_rescale) - - logger.info(f"model {self.name} updated pixel size (um) to {self.pixel_size_um}") + self.pattern_shape = ( + self.pattern_shape[0] // binning_rescale, + self.pattern_shape[1] // binning_rescale, + ) + + logger.info( + f"model {self.name} updated pixel size (um) to {self.pixel_size_um}" + ) logger.info(f"model {self.name} updated pattern_shape to {self.pattern_shape}") self.binning = binning class PatternMethodReturnsSLM(PatternMethod): - pass class PatternReview(PatternMethodReturnsSLM): - name = "pattern_review" - def __init__(self, experiment_name, camera_properties, h5fp: Union[str, Path] = None, channel="545", **kwargs): + def __init__( + self, + experiment_name, + camera_properties, + h5fp: str | Path | None = None, + channel="545", + **kwargs, + ): super().__init__(experiment_name, camera_properties) if h5fp is None: - raise ValueError("pattern_review model requires specifying h5fp (h5 filepath)") + raise ValueError( + "pattern_review model requires specifying h5fp (h5 filepath)" + ) self.fp = Path(h5fp) self.channel_name = f"channel_{channel}" with File(str(self.fp), "r") as f: - keys = list(f.keys()) self.keys = natsorted(keys) @@ -212,21 +249,16 @@ def initialize(self, experiment: Experiment) -> list[AcquiredImageRequest]: return [] def generate(self, data_dock: DataDock) -> np.ndarray: - with File(str(self.fp), "r") as f: - while len(self.keys) > 0: - key = self.keys.pop(0) if self.channel_name in f[key]: - return np.array(f[key]["stim_aq"]["dmd"]) return np.array([]) - if __name__ == "__main__": print(type(PatternMethod)) air = AcquiredImageRequest(uuid4(), True, False) diff --git a/src/pyclm/core/patterns/pattern_process.py b/src/pyclm/core/patterns/pattern_process.py index 585fab8..e934223 100644 --- a/src/pyclm/core/patterns/pattern_process.py +++ b/src/pyclm/core/patterns/pattern_process.py @@ -1,23 +1,36 @@ import logging - from threading import Event +from typing import ClassVar -from ..queues import AllQueues -from ..datatypes import CameraPattern, AcquisitionData, SegmentationData +from ..datatypes import AcquisitionData, CameraPattern, SegmentationData from ..experiments import Experiment from ..messages import Message -from .pattern import PatternReview, PatternMethod, PatternMethodReturnsSLM, DataDock, AcquiredImageRequest, CameraProperties, PatternContext -from .bar_patterns import BouncingBarPattern, BarPatternBase, SawToothMethod +from ..queues import AllQueues +from .bar_patterns import BarPatternBase, BouncingBarPattern, SawToothMethod +from .feedback_control_patterns import ( + BounceModel, + MoveDownModel, + MoveInModel, + MoveOutModel, + RotateCcwModel, +) +from .ktr_patterns import BinaryNucleusClampModel, CenteredImageModel, GlobalCycleModel +from .pattern import ( + AcquiredImageRequest, + CameraProperties, + DataDock, + PatternContext, + PatternMethod, + PatternMethodReturnsSLM, + PatternReview, +) from .static_patterns import CirclePattern, FullOnPattern -from .feedback_control_patterns import RotateCcwModel, MoveInModel, MoveDownModel, MoveOutModel, BounceModel -from .ktr_patterns import BinaryNucleusClampModel, GlobalCycleModel, CenteredImageModel logger = logging.getLogger(__name__) class PatternProcess: - - known_models = { + known_models: ClassVar = { "circle": CirclePattern, "bar": BarPatternBase, "pattern_review": PatternReview, @@ -34,7 +47,7 @@ class PatternProcess: "centered_image": CenteredImageModel, } - def __init__(self, aq: AllQueues, stop_event: Event = None): + def __init__(self, aq: AllQueues, stop_event: Event | None = None): self.stop_event = stop_event self.inbox = aq.manager_to_pattern self.manager = aq.pattern_to_manager @@ -42,8 +55,8 @@ def __init__(self, aq: AllQueues, stop_event: Event = None): self.from_seg = aq.seg_to_pattern self.from_raw = aq.outbox_to_pattern - - self.stream_count = 0 + + self.stream_count = 0 self.camera_properties = None self.initialized = False @@ -57,13 +70,16 @@ def initialize(self, camera_properties: CameraProperties): self.initialized = True def request_method(self, experiment: Experiment) -> list[AcquiredImageRequest]: - method_name = experiment.pattern.method_name model_class: type = self.known_models.get(method_name) - assert model_class is not None, f"method {method_name} is not a registered method" - assert issubclass(model_class, PatternMethod), f"{method_name} is not a PatternMethod" + assert model_class is not None, ( + f"method {method_name} is not a registered method" + ) + assert issubclass(model_class, PatternMethod), ( + f"{method_name} is not a PatternMethod" + ) experiment_name = experiment.experiment_name method_kwargs = experiment.pattern.kwargs @@ -72,13 +88,14 @@ def request_method(self, experiment: Experiment) -> list[AcquiredImageRequest]: self.models[experiment_name] = model - logger.info(f"initializing pattern model \"{method_name}\"") + logger.info(f'initializing pattern model "{method_name}"') return model.initialize(experiment) - def register_method(self, model: type, name: str = None): - - assert issubclass(model, PatternMethod), "model must be a subclass of PatternMethod" + def register_method(self, model: type, name: str | None = None): + assert issubclass(model, PatternMethod), ( + "model must be a subclass of PatternMethod" + ) model_name = model.name if name is not None: @@ -90,20 +107,23 @@ def register_method(self, model: type, name: str = None): self.known_models[model_name] = model def run_model(self, experiment_name, dockname): - data_dock = self.docks.pop(dockname) model = self.models.get(experiment_name, None) - assert isinstance(model, PatternMethod), f"self.models[{'experiment_name'}] is not a PatternMethod" + assert isinstance(model, PatternMethod), ( + f"self.models[{'experiment_name'}] is not a PatternMethod" + ) # Create context wrapper # We assume _experiment_ref is available. If not, we might crash, which is acceptable for alpha breakage. # Ideally, we ensure configure_system is called. if model._experiment_ref is None: - raise RuntimeError(f"Model {model.name} for {experiment_name} was not properly configured with an experiment reference.") - + raise RuntimeError( + f"Model {model.name} for {experiment_name} was not properly configured with an experiment reference." + ) + context = PatternContext(data_dock, model._experiment_ref) if isinstance(model, PatternMethodReturnsSLM): @@ -112,25 +132,26 @@ def run_model(self, experiment_name, dockname): else: pattern = model.generate(context) - self.slm.put(CameraPattern(experiment_name, pattern, slm_coords=False, binning=model.binning)) + self.slm.put( + CameraPattern( + experiment_name, pattern, slm_coords=False, binning=model.binning + ) + ) def dock_string(self, experiment_name, t): return f"{experiment_name}_{t:05d}" def check(self, experiment_name, dockname): - dock: DataDock = self.docks.get(dockname) if dock.check_complete(): self.run_model(experiment_name, dockname) def handle_message(self, message: Message): - match message.message: - case "close": - return False - + return False + case "stream_close": print("pattern received stream close") @@ -138,13 +159,13 @@ def handle_message(self, message: Message): if self.stream_count >= 2: # Signal SLMBuffer that we are done from ..messages import StreamCloseMessage + out_msg = StreamCloseMessage() self.slm.put(out_msg) return True return False case "request_pattern": - assert isinstance(message, RequestPattern) req = message.requirements @@ -171,7 +192,7 @@ def process(self): if self.stop_event and self.stop_event.is_set(): print("force stopping pattern process") break - + if not self.inbox.empty(): msg = self.inbox.get() @@ -180,9 +201,10 @@ def process(self): if not self.from_raw.empty(): data = self.from_raw.get() - + if isinstance(data, Message): - if self.handle_message(data): return + if self.handle_message(data): + return else: assert isinstance(data, AcquisitionData) name = data.event.experiment_name @@ -196,20 +218,21 @@ def process(self): if not self.from_seg.empty(): data = self.from_seg.get() - + if isinstance(data, Message): - if self.handle_message(data): return + if self.handle_message(data): + return else: assert isinstance(data, SegmentationData) name = data.event.experiment_name t_index = data.event.t_index - + dockname = self.dock_string(name, t_index) - + print(f"seg found {dockname}") - + self.docks[dockname].add_seg(data) - + self.check(name, dockname) @@ -222,11 +245,14 @@ class RequestPattern(Message): message = "request_pattern" - def __init__(self, t_index, time_sec, experiment_name: str, requirements: list[AcquiredImageRequest]): - + def __init__( + self, + t_index, + time_sec, + experiment_name: str, + requirements: list[AcquiredImageRequest], + ): self.t_index = t_index self.time_sec = time_sec self.experiment_name = experiment_name self.requirements = requirements - - diff --git a/src/pyclm/core/patterns/static_patterns.py b/src/pyclm/core/patterns/static_patterns.py index 4d1bb11..4900718 100644 --- a/src/pyclm/core/patterns/static_patterns.py +++ b/src/pyclm/core/patterns/static_patterns.py @@ -16,17 +16,18 @@ def __init__(self, rad=1, **kwargs): self.rad = rad def generate(self, context): - h, w = self.pattern_shape - center_x = self.pixel_size_um * w / 2. - center_y = self.pixel_size_um * h / 2. + center_x = self.pixel_size_um * w / 2.0 + center_y = self.pixel_size_um * h / 2.0 xx, yy = self.get_meshgrid() print(h, w) - return (((xx - center_x)**2 + (yy - center_y)**2) < (self.rad**2)).astype(np.float16) + return (((xx - center_x) ** 2 + (yy - center_y) ** 2) < (self.rad**2)).astype( + np.float16 + ) class FullOnPattern(PatternMethod): @@ -40,7 +41,6 @@ def __init__(self, **kwargs): super().__init__(**kwargs) def generate(self, context): - h, w = self.pattern_shape pattern = np.ones((int(h), int(w)), dtype=np.float16) diff --git a/src/pyclm/core/patterns/wave_patterns.py b/src/pyclm/core/patterns/wave_patterns.py index d96dccf..a8b7cd7 100644 --- a/src/pyclm/core/patterns/wave_patterns.py +++ b/src/pyclm/core/patterns/wave_patterns.py @@ -7,6 +7,7 @@ class WavePatternBase(PatternMethod): """ Creates a WavePattern or StationaryWavePattern depending on the requested wavespeed """ + def __new__(cls, *args, **kwargs): if cls is WavePatternBase: # Check if the base class is being instantiated if kwargs.get("wave_speed") != 0: @@ -19,6 +20,7 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) print(f"Initializing {self.__class__.__name__}") + class StationaryWavePattern(WavePatternBase): """ Wave coming out from the center @@ -37,14 +39,13 @@ def __init__(self, duty_cycle=0.2, wave_speed=0, period=30, **kwargs): self.duty_cycle = duty_cycle self.wave_speed = 0 - self.period_space = period # in um - self.period_time = 0 # in minutes + self.period_space = period # in um + self.period_time = 0 # in minutes def generate(self, context): - xx, yy = self.get_meshgrid() center_y, center_x = self.pattern_shape[0] / 2, self.pattern_shape[1] / 2 - distance = np.sqrt((xx - center_x)**2 + (yy - center_y)**2) + distance = np.sqrt((xx - center_x) ** 2 + (yy - center_y) ** 2) is_on = ((distance / self.period_space) % 1.0) < self.duty_cycle @@ -58,31 +59,32 @@ class WavePattern(WavePatternBase): name = "wave" - def __init__(self, duty_cycle=0.2, wave_speed=1, period=30, direction = 1, **kwargs): + def __init__(self, duty_cycle=0.2, wave_speed=1, period=30, direction=1, **kwargs): """ :param duty_cycle: fraction of time spent on (float 0-1), and consequently fraction of radial axis containing "on" pixels :param wave_speed: speed in um/min :param period: period in um - :param direction: movement in/out relative to the center. 1 is out; -1 is in + :param direction: movement in/out relative to the center. 1 is out; -1 is in """ super().__init__(**kwargs) self.duty_cycle = duty_cycle self.wave_speed = wave_speed - self.period_space = period # in um - self.period_time = period / wave_speed # in minutes + self.period_space = period # in um + self.period_time = period / wave_speed # in minutes self.direction = direction def generate(self, context): - t = context.time / 60 xx, yy = self.get_meshgrid() center_y, center_x = self.pattern_shape[0] / 2, self.pattern_shape[1] / 2 - distance = np.sqrt((xx - center_x)**2 + (yy - center_y)**2) + distance = np.sqrt((xx - center_x) ** 2 + (yy - center_y) ** 2) - is_on = (((t*self.direction) - (distance / self.wave_speed)) % self.period_time) < self.duty_cycle*self.period_time + is_on = ( + ((t * self.direction) - (distance / self.wave_speed)) % self.period_time + ) < self.duty_cycle * self.period_time return is_on.astype(np.float16) @@ -159,4 +161,4 @@ def generate(self, context): # data_dock.time_seconds = t -# return super().generate(data_dock) \ No newline at end of file +# return super().generate(data_dock)