From 8f4c3a88a4baaf6a49185e9c245fecf716802e75 Mon Sep 17 00:00:00 2001 From: Jane Van Lam <75lam@cua.edu> Date: Wed, 18 Mar 2026 13:37:29 -0400 Subject: [PATCH] replace preadator, update packages for python 3.13 update gitignore update Dockerfile, pyproject.toml --- .gitignore | 2 + .../binary-operations-tool/.bumpversion.cfg | 2 +- .../images/binary-operations-tool/README.md | 2 +- .../images/binary-operations-tool/VERSION | 2 +- .../images/binary-operations-tool/plugin.json | 4 +- .../binary-operations-tool/pyproject.toml | 2 +- .../images/binary_operations/__init__.py | 2 +- .../image-calculator-tool/.bumpversion.cfg | 2 +- .../images/image-calculator-tool/README.md | 2 +- .../images/image-calculator-tool/VERSION | 2 +- .../images/image-calculator-tool/plugin.json | 4 +- .../image-calculator-tool/pyproject.toml | 2 +- .../images/image_calculator/__init__.py | 2 +- .../pyproject.toml | 18 ++ .../polus-apply-flatfield-plugin/src/main.py | 137 +++++++------ .../src/requirements.txt | 5 +- .../src/autocrop_utils/helpers.py | 1 - .../polus-ftl-label-plugin/pyproject.toml | 12 ++ .../images/polus-ftl-label-plugin/src/main.py | 180 +++++++++++++++++ .../src/requirements.txt | 5 + .../pyproject.toml | 17 ++ .../src/__init__.py | 1 + .../pyproject.toml | 15 ++ .../src/__init__.py | 1 + .../src/main.py | 183 +++++++++--------- .../src/requirements.txt | 3 +- .../polus-stack-z-slice-plugin/pyproject.toml | 18 ++ .../polus-stack-z-slice-plugin/src/main.py | 131 +++++++------ .../src/requirements.txt | 3 +- .../{bumpversion.cfg => .bumpversion.cfg} | 4 +- .../remove-border-objects-plugin/Dockerfile | 23 ++- .../remove-border-objects-plugin/README.md | 7 +- .../remove-border-objects-plugin/VERSION | 2 +- .../build-docker.sh | 2 +- .../package-release.sh | 2 +- .../remove-border-objects-plugin/plugin.json | 6 +- .../pyproject.toml | 20 ++ .../run-plugin.sh | 3 +- .../src/__init__.py | 1 + .../src/functions.py | 69 ++++--- .../remove-border-objects-plugin/src/main.py | 142 ++++++++------ .../src/requirements.txt | 2 +- .../tests/__init__.py | 1 + .../tests/test_main.py | 123 ++++++++---- .../tests/version_test.py | 71 ++++--- 45 files changed, 810 insertions(+), 428 deletions(-) create mode 100644 transforms/images/polus-apply-flatfield-plugin/pyproject.toml create mode 100644 transforms/images/polus-ftl-label-plugin/pyproject.toml create mode 100644 transforms/images/polus-ftl-label-plugin/src/main.py create mode 100644 transforms/images/polus-ftl-label-plugin/src/requirements.txt create mode 100644 transforms/images/polus-image-registration-plugin/pyproject.toml create mode 100644 transforms/images/polus-image-registration-plugin/src/__init__.py create mode 100644 transforms/images/polus-intensity-projection-plugin/pyproject.toml create mode 100644 transforms/images/polus-intensity-projection-plugin/src/__init__.py create mode 100644 transforms/images/polus-stack-z-slice-plugin/pyproject.toml rename transforms/images/remove-border-objects-plugin/{bumpversion.cfg => .bumpversion.cfg} (75%) create mode 100644 transforms/images/remove-border-objects-plugin/pyproject.toml create mode 100644 transforms/images/remove-border-objects-plugin/src/__init__.py create mode 100644 transforms/images/remove-border-objects-plugin/tests/__init__.py diff --git a/.gitignore b/.gitignore index db92e2afa..b9165af77 100644 --- a/.gitignore +++ b/.gitignore @@ -184,6 +184,8 @@ uv.lock #husky node_modules +#uv +transforms/images/*/uv.lock # uv lockfiles (generated; ignore in all nested projects) **/uv.lock diff --git a/transforms/images/binary-operations-tool/.bumpversion.cfg b/transforms/images/binary-operations-tool/.bumpversion.cfg index 17dc81028..501eb51a8 100644 --- a/transforms/images/binary-operations-tool/.bumpversion.cfg +++ b/transforms/images/binary-operations-tool/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 0.5.6-dev0 +current_version = 0.5.4-dev0 commit = True tag = False parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+)(?P\d+))? diff --git a/transforms/images/binary-operations-tool/README.md b/transforms/images/binary-operations-tool/README.md index 2d0409816..2705954b8 100644 --- a/transforms/images/binary-operations-tool/README.md +++ b/transforms/images/binary-operations-tool/README.md @@ -1,4 +1,4 @@ -# Binary Operations Plugin (v0.5.6-dev0) +# Binary Operations Plugin (v0.5.4-dev0) This WIPP plugin does Morphological Image Processing on binary and instance labelled images. The operations available are: diff --git a/transforms/images/binary-operations-tool/VERSION b/transforms/images/binary-operations-tool/VERSION index 66ad49354..d53e4a395 100644 --- a/transforms/images/binary-operations-tool/VERSION +++ b/transforms/images/binary-operations-tool/VERSION @@ -1 +1 @@ -0.5.6-dev0 +0.5.4-dev0 diff --git a/transforms/images/binary-operations-tool/plugin.json b/transforms/images/binary-operations-tool/plugin.json index 597776cb7..17777c117 100644 --- a/transforms/images/binary-operations-tool/plugin.json +++ b/transforms/images/binary-operations-tool/plugin.json @@ -1,7 +1,7 @@ { "name": "Binary Operations Plugin", - "version": "0.5.6-dev0", - "containerId": "polusai/binary-operations-tool:0.5.6-dev0", + "version": "0.5.4-dev0", + "containerId": "polusai/binary-operations-tool:0.5.4-dev0", "title": "Binary Operations Plugin", "description": "Everything you need to start a WIPP plugin.", "author": "Nick Schaub (nick.schaub@nih.gov), Madhuri Vihani", diff --git a/transforms/images/binary-operations-tool/pyproject.toml b/transforms/images/binary-operations-tool/pyproject.toml index a69ce3195..423dc4001 100644 --- a/transforms/images/binary-operations-tool/pyproject.toml +++ b/transforms/images/binary-operations-tool/pyproject.toml @@ -5,7 +5,7 @@ requires-python = ">=3.11" [tool.poetry] name = "polus-images-transforms-images-binary-operations" -version = "0.5.6-dev0" +version = "0.5.4-dev0" description = "" authors = ["nicholas-schaub ", "Madhuri Vihani"] readme = "README.md" diff --git a/transforms/images/binary-operations-tool/src/polus/images/transforms/images/binary_operations/__init__.py b/transforms/images/binary-operations-tool/src/polus/images/transforms/images/binary_operations/__init__.py index 82e579880..0fdae7e29 100644 --- a/transforms/images/binary-operations-tool/src/polus/images/transforms/images/binary_operations/__init__.py +++ b/transforms/images/binary-operations-tool/src/polus/images/transforms/images/binary_operations/__init__.py @@ -1,5 +1,5 @@ """Binary operations tool.""" -__version__ = "0.5.6-dev0" +__version__ = "0.5.4-dev0" from . import utils from .binops import Operation diff --git a/transforms/images/image-calculator-tool/.bumpversion.cfg b/transforms/images/image-calculator-tool/.bumpversion.cfg index a2406afe1..850c717a6 100644 --- a/transforms/images/image-calculator-tool/.bumpversion.cfg +++ b/transforms/images/image-calculator-tool/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 0.2.4-dev0 +current_version = 0.2.3-dev0 commit = False tag = False parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+)(?P\d+))? diff --git a/transforms/images/image-calculator-tool/README.md b/transforms/images/image-calculator-tool/README.md index 32d2b606c..b75227bb9 100644 --- a/transforms/images/image-calculator-tool/README.md +++ b/transforms/images/image-calculator-tool/README.md @@ -1,4 +1,4 @@ -# Image Calculator (v0.2.4-dev0) +# Image Calculator (v0.2.3-dev0) This plugin performs pixel-wise operations between two image collections. For example, images in one image collection can be subtracted from images in another collection. diff --git a/transforms/images/image-calculator-tool/VERSION b/transforms/images/image-calculator-tool/VERSION index 4f5e9fba8..398833448 100644 --- a/transforms/images/image-calculator-tool/VERSION +++ b/transforms/images/image-calculator-tool/VERSION @@ -1 +1 @@ -0.2.4-dev0 +0.2.3-dev0 diff --git a/transforms/images/image-calculator-tool/plugin.json b/transforms/images/image-calculator-tool/plugin.json index 0d205b30d..d53c6ae01 100644 --- a/transforms/images/image-calculator-tool/plugin.json +++ b/transforms/images/image-calculator-tool/plugin.json @@ -1,6 +1,6 @@ { "name": "Image Calculator", - "version": "0.2.4-dev0", + "version": "0.2.3-dev0", "title": "Image Calculator", "description": "Perform simple mathematical operations on images.", "author": "Nick Schaub (nick.schaub@nih.gov)", @@ -8,7 +8,7 @@ "repository": "https://github.com/labshare/polus-plugins", "website": "https://ncats.nih.gov/preclinical/core/informatics", "citation": "", - "containerId": "polusai/image-calculator-tool:0.2.4-dev0", + "containerId": "polusai/image-calculator-tool:0.2.3-dev0", "baseCommand": [ "python3", "-m", diff --git a/transforms/images/image-calculator-tool/pyproject.toml b/transforms/images/image-calculator-tool/pyproject.toml index 0473cff99..8c2148178 100644 --- a/transforms/images/image-calculator-tool/pyproject.toml +++ b/transforms/images/image-calculator-tool/pyproject.toml @@ -5,7 +5,7 @@ requires-python = ">=3.11" [tool.poetry] name = "polus-images-transforms-images-image-calculator" -version = "0.2.4-dev0" +version = "0.2.3-dev0" description = "" authors = [ "Nicholas Schaub ", diff --git a/transforms/images/image-calculator-tool/src/polus/images/transforms/images/image_calculator/__init__.py b/transforms/images/image-calculator-tool/src/polus/images/transforms/images/image_calculator/__init__.py index 8dcbcbb6f..a429c66c1 100644 --- a/transforms/images/image-calculator-tool/src/polus/images/transforms/images/image_calculator/__init__.py +++ b/transforms/images/image-calculator-tool/src/polus/images/transforms/images/image_calculator/__init__.py @@ -6,4 +6,4 @@ from .calculator import Operation from .calculator import process_image -__version__ = "0.2.4-dev0" +__version__ = "0.2.3-dev0" diff --git a/transforms/images/polus-apply-flatfield-plugin/pyproject.toml b/transforms/images/polus-apply-flatfield-plugin/pyproject.toml new file mode 100644 index 000000000..e7010de70 --- /dev/null +++ b/transforms/images/polus-apply-flatfield-plugin/pyproject.toml @@ -0,0 +1,18 @@ +[project] +name = "polus-apply-flatfield-plugin" +version = "1.2.0" +requires-python = ">=3.13" +dependencies = [ + "bfio>=2.5.0", + "filepattern>=2.2.1", +] + +[build-system] +requires = ["setuptools>=61.0", "wheel"] +build-backend = "setuptools.build_meta" + +[tool.setuptools.package-dir] +"" = "src" + +[tool.setuptools] +py-modules = ["main"] diff --git a/transforms/images/polus-apply-flatfield-plugin/src/main.py b/transforms/images/polus-apply-flatfield-plugin/src/main.py index c6090518f..09739cac1 100644 --- a/transforms/images/polus-apply-flatfield-plugin/src/main.py +++ b/transforms/images/polus-apply-flatfield-plugin/src/main.py @@ -2,12 +2,12 @@ import logging import os import typing +from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor from pathlib import Path from bfio import BioReader, BioWriter from filepattern import FilePattern import numpy as np -from preadator import ProcessManager FILE_EXT = os.environ.get("POLUS_EXT", None) FILE_EXT = FILE_EXT if FILE_EXT is not None else ".ome.tif" @@ -21,61 +21,57 @@ def unshade_images( flatfield: np.ndarray, darkfield: np.ndarray = None, ): - with ProcessManager.process(): - - # Initialize the output - X = flatfield.shape[1] - Y = flatfield.shape[0] - N = len(flist) - - img_stack = np.zeros((N, Y, X), dtype=np.float32) - - # Load the images - def load_and_store(fname, ind): - with ProcessManager.thread() as active_threads: - with BioReader(fname["file"], max_workers=active_threads.count) as br: - img_stack[ind, ...] = np.squeeze(br[:, :, 0, 0, 0]) - - for ind, fname in enumerate(flist): - ProcessManager.submit_thread(load_and_store, fname, ind) - - ProcessManager.join_threads(5) - - # Apply flatfield correction - if darkfield is not None: - img_stack -= darkfield - - img_stack /= flatfield - - # Save outputs - def save_output(fname, ind): - with ProcessManager.thread() as active_threads: - with BioReader(fname["file"], max_workers=active_threads.count) as br: - - # replace the file name extension if needed - inp_image = fname["file"] - extension = "".join( - [ - suffix - for suffix in inp_image.suffixes[-2:] - if len(suffix) < 6 - ] - ) - out_path = out_dir.joinpath( - inp_image.name.replace(extension, FILE_EXT) - ) - - with BioWriter( - out_path, - metadata=br.metadata, - max_workers=active_threads.count, - ) as bw: - bw[:] = img_stack[ind].astype(bw.dtype) - - for ind, fname in enumerate(flist): - ProcessManager.submit_thread(save_output, fname, ind) - - ProcessManager.join_threads(5) + max_workers = 5 + # Initialize the output + X = flatfield.shape[1] + Y = flatfield.shape[0] + N = len(flist) + + img_stack = np.zeros((N, Y, X), dtype=np.float32) + + # Load the images + def load_and_store(fname, ind): + with BioReader(fname["file"], max_workers=max_workers) as br: + img_stack[ind, ...] = np.squeeze(br[:, :, 0, 0, 0]) + + with ThreadPoolExecutor(max_workers=max_workers) as executor: + futures = [ + executor.submit(load_and_store, fname, ind) + for ind, fname in enumerate(flist) + ] + for f in futures: + f.result() + + # Apply flatfield correction + if darkfield is not None: + img_stack -= darkfield + + img_stack /= flatfield + + # Save outputs + def save_output(fname, ind): + with BioReader(fname["file"], max_workers=max_workers) as br: + inp_image = fname["file"] + extension = "".join( + [suffix for suffix in inp_image.suffixes[-2:] if len(suffix) < 6] + ) + out_path = out_dir.joinpath( + inp_image.name.replace(extension, FILE_EXT) + ) + with BioWriter( + out_path, + metadata=br.metadata, + max_workers=max_workers, + ) as bw: + bw[:] = img_stack[ind].astype(bw.dtype) + + with ThreadPoolExecutor(max_workers=max_workers) as executor: + futures = [ + executor.submit(save_output, fname, ind) + for ind, fname in enumerate(flist) + ] + for f in futures: + f.result() def unshade_batch( @@ -97,17 +93,19 @@ def unshade_batch( if batches[-1] != len(files): batches.append(len(files)) - for i_start, i_end in zip(batches[:-1], batches[1:]): - - ProcessManager.submit_process( - unshade_images, - files[i_start:i_end], - out_dir, - brightfield_image, - darkfield_image, - ) - - ProcessManager.join_processes() + with ProcessPoolExecutor() as executor: + futures = [ + executor.submit( + unshade_images, + files[i_start:i_end], + out_dir, + brightfield_image, + darkfield_image, + ) + for i_start, i_end in zip(batches[:-1], batches[1:]) + ] + for f in futures: + f.result() def main( @@ -134,9 +132,6 @@ def main( group_by = [v for v in fp.variables if v not in ff_files.variables] GROUPED = group_by + ["file"] - ProcessManager.init_processes("main", "unshade") - logger.info(f"Running with {ProcessManager.num_processes()} processes.") - for files in fp(group_by=group_by): flat_path = ff_files.get_matching( @@ -168,8 +163,6 @@ def main( unshade_batch(files, outDir, flat_path, dark_path, photo_path) - # ProcessManager.join_processes() - if __name__ == "__main__": """Initialize the logger""" diff --git a/transforms/images/polus-apply-flatfield-plugin/src/requirements.txt b/transforms/images/polus-apply-flatfield-plugin/src/requirements.txt index be4eb88f8..6b24f6745 100644 --- a/transforms/images/polus-apply-flatfield-plugin/src/requirements.txt +++ b/transforms/images/polus-apply-flatfield-plugin/src/requirements.txt @@ -1,3 +1,2 @@ -bfio==2.1.9 -filepattern==1.4.7 -preadator==0.2.0 +bfio>=2.5.0 +filepattern>=2.2.1 diff --git a/transforms/images/polus-autocropping-plugin/src/autocrop_utils/helpers.py b/transforms/images/polus-autocropping-plugin/src/autocrop_utils/helpers.py index 55a2ccd53..a7784b219 100644 --- a/transforms/images/polus-autocropping-plugin/src/autocrop_utils/helpers.py +++ b/transforms/images/polus-autocropping-plugin/src/autocrop_utils/helpers.py @@ -1,4 +1,3 @@ -"""Helpers for tiles, strips, distograms, gradients, and bounding boxes.""" from collections.abc import Generator from pathlib import Path from typing import Optional diff --git a/transforms/images/polus-ftl-label-plugin/pyproject.toml b/transforms/images/polus-ftl-label-plugin/pyproject.toml new file mode 100644 index 000000000..359027511 --- /dev/null +++ b/transforms/images/polus-ftl-label-plugin/pyproject.toml @@ -0,0 +1,12 @@ +[project] +name = "polus-ftl-label-plugin" +version = "0.3.10" +requires-python = ">=3.13" +dependencies = [ + "numpy>=2.4.3", + "bfio>=2.5.0", +] + +[build-system] +requires = ["setuptools>=41.0.0", "wheel", "setuptools_rust>=0.10.2"] +build-backend = "setuptools.build_meta" diff --git a/transforms/images/polus-ftl-label-plugin/src/main.py b/transforms/images/polus-ftl-label-plugin/src/main.py new file mode 100644 index 000000000..58bff26dd --- /dev/null +++ b/transforms/images/polus-ftl-label-plugin/src/main.py @@ -0,0 +1,180 @@ +import argparse +import logging +import os +from pathlib import Path +from typing import List, Tuple + +import os +import numpy +from concurrent.futures import ThreadPoolExecutor +from bfio import BioReader +from bfio import BioWriter + +import ftl +from ftl_rust import PolygonSet + +POLUS_LOG = getattr(logging, os.environ.get('POLUS_LOG', 'INFO')) +POLUS_EXT = os.environ.get('POLUS_EXT', '.ome.tif') # TODO: Figure out how to use this + +# Initialize the logger +logging.basicConfig( + format='%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s', + datefmt='%d-%b-%y %H:%M:%S', +) +logger = logging.getLogger("main") +logger.setLevel(POLUS_LOG) + + +def get_output_name(filename: str) -> str: + name = filename.split('.ome')[0] + return f'{name}{POLUS_EXT}' + + +def filter_by_size(file_paths: List[Path], size_threshold: int) -> Tuple[List[Path], List[Path]]: + """ Partitions the input files by the memory-footprint for the images. + + Args: + file_paths: The list of files to partition. + size_threshold: The memory-size (in MB) to use as a threshold. + + Returns: + A 2-tuple of lists of paths. + The first list contains small images and the second list contains large images. + """ + small_files, large_files = list(), list() + threshold: int = size_threshold * 1024 * 1024 + + for file_path in file_paths: + with BioReader(file_path) as reader: + num_pixels = numpy.prod(reader.shape) + dtype = reader.dtype + + if dtype in (numpy.uint8, bool): + pixel_bytes = 8 + elif dtype == numpy.uint16: + pixel_bytes = 16 + elif dtype == numpy.uint32: + pixel_bytes = 32 + else: + pixel_bytes = 64 + + image_size = num_pixels * (pixel_bytes / 8) # Convert bits to bytes + (small_files if image_size <= threshold else large_files).append(file_path) + + return small_files, large_files + + +def label_cython(input_path: Path, output_path: Path, connectivity: int): + """Label the input image and writes labels back out. + + Args: + input_path: Path to input image. + output_path: Path for output image. + connectivity: Connectivity kind. + """ + max_workers = max(1, (os.cpu_count() or 4) // 2) + with BioReader(input_path, max_workers=max_workers) as reader: + with BioWriter( + output_path, + max_workers=max_workers, + metadata=reader.metadata, + ) as writer: + # Load an image and convert to binary + image = numpy.squeeze(reader[..., 0, 0]) + + if not numpy.any(image): + writer.dtype = numpy.uint8 + writer[:] = numpy.zeros_like(image, dtype=numpy.uint8) + return True + + image = image > 0 + if connectivity > image.ndim: + logger.warning( + "%s: Connectivity is not less than or equal to the number of image dimensions, " + "skipping this image. connectivity=%s, ndim=%s", + input_path.name, + connectivity, + image.ndim, + ) + return False + + # Run the labeling algorithm + labels = ftl.label_nd(image, connectivity) + + # Save the image + writer.dtype = labels.dtype + writer[:] = labels + return True + + +if __name__ == "__main__": + # Setup the argument parsing + logger.info("Parsing arguments...") + parser = argparse.ArgumentParser( + prog='main', + description='Label objects in a 2d or 3d binary image.', + ) + + parser.add_argument( + '--inpDir', dest='inpDir', type=str, required=True, + help='Input image collection to be processed by this plugin', + ) + + parser.add_argument( + '--connectivity', dest='connectivity', type=str, required=True, + help='City block connectivity, must be less than or equal to the number of dimensions', + ) + + parser.add_argument( + '--outDir', dest='outDir', type=str, required=True, + help='Output collection', + ) + + # Parse the arguments + args = parser.parse_args() + + _connectivity = int(args.connectivity) + logger.info(f'connectivity = {_connectivity}') + + _input_dir = Path(args.inpDir).resolve() + assert _input_dir.exists(), f'{_input_dir } does not exist.' + if _input_dir.joinpath('images').is_dir(): + _input_dir = _input_dir.joinpath('images') + logger.info(f'inpDir = {_input_dir}') + + _output_dir = Path(args.outDir).resolve() + assert _output_dir.exists(), f'{_output_dir } does not exist.' + logger.info(f'outDir = {_output_dir}') + + # Get all file names in inpDir image collection + _files = list( + filter( + lambda _file: _file.is_file() and _file.name.endswith(".ome.tif"), + _input_dir.iterdir(), + ) + ) + _small_files, _large_files = filter_by_size(_files, 500) + + logger.info("processing %s images in total...", len(_files)) + logger.info("processing %s small images with cython...", len(_small_files)) + logger.info("processing %s large images with rust", len(_large_files)) + + if _small_files: + max_workers = max(1, (os.cpu_count() or 4) // 2) + with ThreadPoolExecutor(max_workers=max_workers) as executor: + futures = [ + executor.submit( + label_cython, + _infile, + _output_dir.joinpath(get_output_name(_infile.name)), + _connectivity, + ) + for _infile in _small_files + ] + for f in futures: + f.result() + + if _large_files: + for _infile in _large_files: + _outfile = _output_dir.joinpath(get_output_name(_infile.name)) + PolygonSet(_connectivity).read_from(_infile).write_to(_outfile) diff --git a/transforms/images/polus-ftl-label-plugin/src/requirements.txt b/transforms/images/polus-ftl-label-plugin/src/requirements.txt new file mode 100644 index 000000000..ce5685b04 --- /dev/null +++ b/transforms/images/polus-ftl-label-plugin/src/requirements.txt @@ -0,0 +1,5 @@ +Cython>=3.0.0 +numpy>=2.4.3 +bfio[all]>=2.5.0 +filepattern>=2.2.1 + diff --git a/transforms/images/polus-image-registration-plugin/pyproject.toml b/transforms/images/polus-image-registration-plugin/pyproject.toml new file mode 100644 index 000000000..3808ecc69 --- /dev/null +++ b/transforms/images/polus-image-registration-plugin/pyproject.toml @@ -0,0 +1,17 @@ +[project] +name = "polus-image-registration-plugin" +version = "0.3.5" +description = "This plugin registers an image collection" +requires-python = ">=3.13" +dependencies = [ + "numpy>=2.4.3", + "opencv-python-headless>=4.7.0", + "filepattern>=2.2.1", +] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.hatch.build.targets.wheel] +packages = ["src"] diff --git a/transforms/images/polus-image-registration-plugin/src/__init__.py b/transforms/images/polus-image-registration-plugin/src/__init__.py new file mode 100644 index 000000000..c197bd65d --- /dev/null +++ b/transforms/images/polus-image-registration-plugin/src/__init__.py @@ -0,0 +1 @@ +"""Image registration plugin.""" diff --git a/transforms/images/polus-intensity-projection-plugin/pyproject.toml b/transforms/images/polus-intensity-projection-plugin/pyproject.toml new file mode 100644 index 000000000..6a0ae9e5f --- /dev/null +++ b/transforms/images/polus-intensity-projection-plugin/pyproject.toml @@ -0,0 +1,15 @@ +[project] +name = "polus-intensity-projection-plugin" +version = "0.1.9" +description = "Calculate volumetric intensity projections" +requires-python = ">=3.13" +dependencies = [ + "bfio>=2.5.0", +] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.hatch.build.targets.wheel] +packages = ["src"] diff --git a/transforms/images/polus-intensity-projection-plugin/src/__init__.py b/transforms/images/polus-intensity-projection-plugin/src/__init__.py new file mode 100644 index 000000000..9739f9602 --- /dev/null +++ b/transforms/images/polus-intensity-projection-plugin/src/__init__.py @@ -0,0 +1 @@ +"""Intensity projection plugin.""" diff --git a/transforms/images/polus-intensity-projection-plugin/src/main.py b/transforms/images/polus-intensity-projection-plugin/src/main.py index 959633148..88b01b5ab 100644 --- a/transforms/images/polus-intensity-projection-plugin/src/main.py +++ b/transforms/images/polus-intensity-projection-plugin/src/main.py @@ -1,8 +1,13 @@ -import argparse, logging, time, sys, os, traceback -from bfio.bfio import BioReader, BioWriter +import argparse +import logging +import time +import sys +import os +import traceback +from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor from pathlib import Path import numpy as np -from preadator import ProcessManager +from bfio.bfio import BioReader, BioWriter # x,y size of the 3d image chunk to be loaded into memory tile_size = 1024 @@ -10,125 +15,129 @@ # depth of the 3d image chunk tile_size_z = 128 -def max_min_projection(br, bw, x_range, y_range, **kwargs): - """ Calculate the max or min intensity - projection of a section of the input image. +def max_min_projection(br, bw, x_range, y_range, max_workers, **kwargs): + """Calculate the max or min intensity projection of a section of the input image. Args: br (BioReader object): input file object bw (BioWriter object): output file object x_range (tuple): x-range of the img to be processed y_range (tuple): y-range of the img to be processed - - Returns: - image array : Max IP of the input volume + max_workers: number of workers for reader/writer """ - with ProcessManager.thread(): - br.max_workers = ProcessManager._active_threads - bw.max_workers = ProcessManager._active_threads + br.max_workers = max_workers + bw.max_workers = max_workers - # set projection method - if not 'method' in kwargs: - method = np.max - else: - method = kwargs['method'] + # set projection method + method = kwargs.get("method", np.max) - # x,y range of the volume - x, x_max = x_range - y, y_max = y_range + # x,y range of the volume + x, x_max = x_range + y, y_max = y_range - # iterate over depth - for ind, z in enumerate(range(0,br.Z,tile_size_z)): - z_max = min([br.Z,z+tile_size_z]) - if ind == 0: - out_image = method(br[y:y_max,x:x_max,z:z_max,0,0], axis=2) - else: - out_image = np.dstack((out_image, method(br[y:y_max,x:x_max,z:z_max,0,0], axis=2))) + # iterate over depth + for ind, z in enumerate(range(0, br.Z, tile_size_z)): + z_max = min([br.Z, z + tile_size_z]) + if ind == 0: + out_image = method(br[y:y_max, x:x_max, z:z_max, 0, 0], axis=2) + else: + out_image = np.dstack( + (out_image, method(br[y:y_max, x:x_max, z:z_max, 0, 0], axis=2)) + ) - # output image - bw[y:y_max,x:x_max,0:1,0,0] = method(out_image, axis=2) + # output image + bw[y:y_max, x:x_max, 0:1, 0, 0] = method(out_image, axis=2) -def mean_projection(br, bw, x_range, y_range, **kwargs): - """ Calculate the mean intensity projection +def mean_projection(br, bw, x_range, y_range, max_workers, **kwargs): + """Calculate the mean intensity projection. Args: br (BioReader object): input file object bw (BioWriter object): output file object x_range (tuple): x-range of the img to be processed y_range (tuple): y-range of the img to be processed - - Returns: - image array : Mean IP of the input volume + max_workers: number of workers for reader/writer """ - with ProcessManager.thread(): - br.max_workers = ProcessManager._active_threads - bw.max_workers = ProcessManager._active_threads + br.max_workers = max_workers + bw.max_workers = max_workers - # x,y range of the volume - x, x_max = x_range - y, y_max = y_range + # x,y range of the volume + x, x_max = x_range + y, y_max = y_range - # iterate over depth - out_image = np.zeros((y_max-y,x_max-x),dtype=np.float64) - for ind, z in enumerate(range(0,br.Z,tile_size_z)): - z_max = min([br.Z,z+tile_size_z]) + # iterate over depth + out_image = np.zeros((y_max - y, x_max - x), dtype=np.float64) + for ind, z in enumerate(range(0, br.Z, tile_size_z)): + z_max = min([br.Z, z + tile_size_z]) + out_image += np.sum( + br[y:y_max, x:x_max, z:z_max, ...].astype(np.float64), axis=2 + ).squeeze() - out_image += np.sum(br[y:y_max,x:x_max,z:z_max,...].astype(np.float64),axis=2).squeeze() - - # output image - out_image /= br.Z - bw[y:y_max,x:x_max,0:1,0,0] = out_image.astype(br.dtype) + # output image + out_image /= br.Z + bw[y:y_max, x:x_max, 0:1, 0, 0] = out_image.astype(br.dtype) def process_image(input_img_path, output_img_path, projection, method): - - # Grab a free process - with ProcessManager.process(): - - # initalize biowriter and bioreader - with BioReader(input_img_path, max_workers=ProcessManager._active_threads) as br, \ - BioWriter(output_img_path, metadata=br.metadata, max_workers=ProcessManager._active_threads) as bw: - - # output image is 2d - bw.Z = 1 - - # iterate along the x,y direction - for x in range(0,br.X,tile_size): - x_max = min([br.X,x+tile_size]) - - for y in range(0,br.Y,tile_size): - y_max = min([br.Y,y+tile_size]) - - ProcessManager.submit_thread(projection,br,bw,(x, x_max),(y, y_max),method=method) - - ProcessManager.join_threads() + max_workers = max(1, (os.cpu_count() or 4) // 2) + + with BioReader( + input_img_path, max_workers=max_workers + ) as br, BioWriter( + output_img_path, metadata=br.metadata, max_workers=max_workers + ) as bw: + # output image is 2d + bw.Z = 1 + + with ThreadPoolExecutor(max_workers=max_workers) as executor: + futures = [] + for x in range(0, br.X, tile_size): + x_max = min([br.X, x + tile_size]) + for y in range(0, br.Y, tile_size): + y_max = min([br.Y, y + tile_size]) + if method is not None: + futures.append( + executor.submit( + projection, br, bw, (x, x_max), (y, y_max), max_workers, method=method + ) + ) + else: + futures.append( + executor.submit( + projection, br, bw, (x, x_max), (y, y_max), max_workers + ) + ) + for f in futures: + f.result() def main(inpDir, outDir, projection, method): - # images in the input directory inpDir_files = os.listdir(inpDir) - inpDir_files = [filename for filename in inpDir_files if filename.endswith('.ome.tif')] + inpDir_files = [ + filename for filename in inpDir_files if filename.endswith(".ome.tif") + ] - # Surround with try/finally for proper error catching try: - for image_name in inpDir_files: - - input_img_path = os.path.join(inpDir, image_name) - output_img_path = os.path.join(outDir, image_name) - - ProcessManager.submit_process(process_image, input_img_path, output_img_path, projection, method) - - ProcessManager.join_processes() - + with ProcessPoolExecutor() as executor: + futures = [ + executor.submit( + process_image, + os.path.join(inpDir, image_name), + os.path.join(outDir, image_name), + projection, + method, + ) + for image_name in inpDir_files + ] + for f in futures: + f.result() except Exception: traceback.print_exc() - finally: - # Exit the program - logger.info('Exiting the workflow..') + logger.info("Exiting the workflow..") sys.exit() if __name__=="__main__": @@ -170,11 +179,9 @@ def main(inpDir, outDir, projection, method): elif projectionType == 'min': projection = max_min_projection method = np.min - elif projectionType == 'mean': + elif projectionType == "mean": projection = mean_projection method = None - - ProcessManager.init_processes('main','intensity') main(inpDir, outDir, projection, method) diff --git a/transforms/images/polus-intensity-projection-plugin/src/requirements.txt b/transforms/images/polus-intensity-projection-plugin/src/requirements.txt index 28c54f578..75d9c3a2f 100644 --- a/transforms/images/polus-intensity-projection-plugin/src/requirements.txt +++ b/transforms/images/polus-intensity-projection-plugin/src/requirements.txt @@ -1,2 +1 @@ -bfio==2.0.5 -preadator==0.2.0 \ No newline at end of file +bfio>=2.5.0 \ No newline at end of file diff --git a/transforms/images/polus-stack-z-slice-plugin/pyproject.toml b/transforms/images/polus-stack-z-slice-plugin/pyproject.toml new file mode 100644 index 000000000..58cfc0b62 --- /dev/null +++ b/transforms/images/polus-stack-z-slice-plugin/pyproject.toml @@ -0,0 +1,18 @@ +[project] +name = "polus-stack-z-slice-plugin" +version = "1.2.4" +requires-python = ">=3.13" +dependencies = [ + "bfio>=2.5.0", + "filepattern>=2.2.1", +] + +[build-system] +requires = ["setuptools>=61.0", "wheel"] +build-backend = "setuptools.build_meta" + +[tool.setuptools.package-dir] +"" = "src" + +[tool.setuptools] +py-modules = ["main"] diff --git a/transforms/images/polus-stack-z-slice-plugin/src/main.py b/transforms/images/polus-stack-z-slice-plugin/src/main.py index 6b18b575c..fbfc60194 100644 --- a/transforms/images/polus-stack-z-slice-plugin/src/main.py +++ b/transforms/images/polus-stack-z-slice-plugin/src/main.py @@ -1,10 +1,19 @@ -import argparse, logging, math, filepattern, time, queue +import argparse +import logging +import math +import filepattern +import time +import queue +import os +from concurrent.futures import ProcessPoolExecutor +from pathlib import Path from bfio import BioReader, BioWriter -import pathlib -from preadator import ProcessManager -logging.basicConfig(format='%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s', - datefmt='%d-%b-%y %H:%M:%S') +logging.basicConfig( + format="%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s", + datefmt="%d-%b-%y %H:%M:%S", +) +logger = logging.getLogger("main") # length/width of the chunk each _merge_layers thread processes at once chunk_size = 8192 @@ -17,44 +26,46 @@ 'nm': 1, 'Å': 10**-1} -def _merge_layers(input_files,output_path): - - with ProcessManager.process(output_path.name): - - # Get the number of layers to stack - z_size = 0 - for f in input_files: - with BioReader(f['file']) as br: - z_size += br.z - - # Get some basic info about the files to stack - with BioReader(input_files[0]['file']) as br: - - # Get the physical z-distance if available, set to physical x if not - ps_z = br.ps_z - - # If the z-distances are undefined, average the x and y together - if None in ps_z: - # Get the size and units for x and y - x_val,x_units = br.ps_x - y_val,y_units = br.ps_y - - # Convert x and y values to the same units and average - z_val = (x_val*UNITS[x_units] + y_val*UNITS[y_units])/2 - - # Set z units to the smaller of the units between x and y - z_units = x_units if UNITS[x_units] < UNITS[y_units] else y_units - - # Convert z to the proper unit scale - z_val /= UNITS[z_units] - ps_z = (z_val,z_units) - ProcessManager.log('Could not find physical z-size. Using the average of x & y {}.'.format(ps_z)) - - # Hold a reference to the metadata once the file gets closed - metadata = br.metadata - - # Create the output file within a context manager - with BioWriter(output_path,metadata=metadata,max_workers=ProcessManager._active_threads) as bw: +def _merge_layers(input_files, output_path): + max_workers = max(1, (os.cpu_count() or 4) // 2) + + # Get the number of layers to stack + z_size = 0 + for f in input_files: + with BioReader(f["file"]) as br: + z_size += br.z + + # Get some basic info about the files to stack + with BioReader(input_files[0]["file"]) as br: + # Get the physical z-distance if available, set to physical x if not + ps_z = br.ps_z + + # If the z-distances are undefined, average the x and y together + if None in ps_z: + # Get the size and units for x and y + x_val, x_units = br.ps_x + y_val, y_units = br.ps_y + + # Convert x and y values to the same units and average + z_val = (x_val * UNITS[x_units] + y_val * UNITS[y_units]) / 2 + + # Set z units to the smaller of the units between x and y + z_units = x_units if UNITS[x_units] < UNITS[y_units] else y_units + + # Convert z to the proper unit scale + z_val /= UNITS[z_units] + ps_z = (z_val, z_units) + logger.info( + "Could not find physical z-size. Using the average of x & y {}.".format( + ps_z + ) + ) + + # Hold a reference to the metadata once the file gets closed + metadata = br.metadata + + # Create the output file within a context manager + with BioWriter(output_path, metadata=metadata, max_workers=max_workers) as bw: # Adjust the dimensions before writing bw.z = z_size @@ -67,7 +78,7 @@ def _merge_layers(input_files,output_path): for file in input_files: # Open an image - with BioReader(file['file'],max_workers=ProcessManager._active_threads) as br: + with BioReader(file["file"], max_workers=max_workers) as br: # Open z-layers one at a time for z in range(br.z): @@ -84,9 +95,10 @@ def _merge_layers(input_files,output_path): zi += 1 - # update the BioWriter in case the ProcessManager found more threads - bw.max_workers = ProcessManager._active_threads - + # update the BioWriter max_workers + bw.max_workers = max_workers + + def main(input_dir: pathlib.Path, file_pattern: str, output_dir: pathlib.Path @@ -95,14 +107,14 @@ def main(input_dir: pathlib.Path, # create the filepattern object fp = filepattern.FilePattern(input_dir,file_pattern) - for files in fp(group_by='z'): - - output_name = fp.output_name(files) - output_file = output_dir.joinpath(output_name) - - ProcessManager.submit_process(_merge_layers,files,output_file) - - ProcessManager.join_processes() + with ProcessPoolExecutor() as executor: + futures = [] + for files in fp(group_by="z"): + output_name = fp.output_name(files) + output_file = output_dir.joinpath(output_name) + futures.append(executor.submit(_merge_layers, files, output_file)) + for f in futures: + f.result() if __name__ == "__main__": # Initialize the main thread logger @@ -126,12 +138,9 @@ def main(input_dir: pathlib.Path, input_dir = input_dir.joinpath("images") output_dir = pathlib.Path(args.output_dir) file_pattern = args.file_pattern - logger.info(f'input_dir = {input_dir}') - logger.info(f'output_dir = {output_dir}') - logger.info(f'file_pattern = {file_pattern}') - logger.info(f'max_threads: {ProcessManager.num_processes()}') - - ProcessManager.init_processes('main','stack') + logger.info(f"input_dir = {input_dir}") + logger.info(f"output_dir = {output_dir}") + logger.info(f"file_pattern = {file_pattern}") main(input_dir, file_pattern, diff --git a/transforms/images/polus-stack-z-slice-plugin/src/requirements.txt b/transforms/images/polus-stack-z-slice-plugin/src/requirements.txt index 2a7cb144c..634c4b837 100644 --- a/transforms/images/polus-stack-z-slice-plugin/src/requirements.txt +++ b/transforms/images/polus-stack-z-slice-plugin/src/requirements.txt @@ -1,2 +1 @@ -filepattern==1.4.7 -preadator==0.2.0 +filepattern>=2.2.1 diff --git a/transforms/images/remove-border-objects-plugin/bumpversion.cfg b/transforms/images/remove-border-objects-plugin/.bumpversion.cfg similarity index 75% rename from transforms/images/remove-border-objects-plugin/bumpversion.cfg rename to transforms/images/remove-border-objects-plugin/.bumpversion.cfg index b4ebbaa01..1176d3e98 100644 --- a/transforms/images/remove-border-objects-plugin/bumpversion.cfg +++ b/transforms/images/remove-border-objects-plugin/.bumpversion.cfg @@ -1,8 +1,10 @@ [bumpversion] -current_version = 0.1.1 +current_version = 0.1.2 commit = False tag = False +[bumpversion:file:VERSION] + [bumpversion:file:plugin.json] [bumpversion:file:README.md] diff --git a/transforms/images/remove-border-objects-plugin/Dockerfile b/transforms/images/remove-border-objects-plugin/Dockerfile index 477454ea8..611d838c1 100644 --- a/transforms/images/remove-border-objects-plugin/Dockerfile +++ b/transforms/images/remove-border-objects-plugin/Dockerfile @@ -1,8 +1,15 @@ -FROM labshare/polus-bfio-util:2.1.9-tensorflow -ENV EXEC_DIR="/opt/executables" -RUN mkdir -p ${EXEC_DIR} -COPY VERSION ${EXEC_DIR} -COPY src ${EXEC_DIR}/ -RUN pip3 install -r ${EXEC_DIR}/requirements.txt --no-cache-dir && \ - pip3 install "bfio[all]" -ENTRYPOINT ["python3", "main.py"] \ No newline at end of file +# Build from repo root (monorepo) or from this tool directory — both work. +# (Previously used a TensorFlow base image; this matches other tools — rebuild if TF is required.) +FROM polusai/bfio:2.5.0 +ENV EXEC_DIR="/opt/executables" POLUS_IMG_EXT=".ome.tif" POLUS_TAB_EXT=".csv" POLUS_LOG="INFO" +WORKDIR ${EXEC_DIR} +ENV TOOL_DIR="transforms/images/remove-border-objects-plugin" +RUN mkdir -p image-tools +COPY . ${EXEC_DIR}/image-tools +RUN pip3 install -U pip setuptools wheel \ + && python3 -c 'import sys; assert sys.version_info>=(3,11)' \ + && R="${EXEC_DIR}/image-tools" && M="$R/$TOOL_DIR" \ + && if [ -f "$M/pyproject.toml" ]; then pip3 install --no-cache-dir "$M"; \ + else pip3 install --no-cache-dir "$R"; fi +ENTRYPOINT ["python3", "-m", "main"] +CMD ["--help"] diff --git a/transforms/images/remove-border-objects-plugin/README.md b/transforms/images/remove-border-objects-plugin/README.md index 97e4d88fe..ab0b19064 100644 --- a/transforms/images/remove-border-objects-plugin/README.md +++ b/transforms/images/remove-border-objects-plugin/README.md @@ -1,5 +1,6 @@ # Remove border objects +**Version:** v0.1.2 Remove border objects plugin clear objects which touch image borders and squentially relabelling of image objects @@ -16,7 +17,7 @@ At the moment this plugin supports label images with two dimensions only. We wil -**a -** Original image contains 67 unique label objects +**a -** Original image contains 67 unique label objects **b -** Image with 16 detected border objects **c -** Removing Border objects and sequential relabelling @@ -41,7 +42,3 @@ This plugin takes two input arguments and | `--inpDir` | Input image directory | Input | collection | | `--pattern` | Filepattern to parse image files | Input | string | | `--outDir` | Output collection | Output | collection | - - - - diff --git a/transforms/images/remove-border-objects-plugin/VERSION b/transforms/images/remove-border-objects-plugin/VERSION index 6da28dde7..d917d3e26 100644 --- a/transforms/images/remove-border-objects-plugin/VERSION +++ b/transforms/images/remove-border-objects-plugin/VERSION @@ -1 +1 @@ -0.1.1 \ No newline at end of file +0.1.2 diff --git a/transforms/images/remove-border-objects-plugin/build-docker.sh b/transforms/images/remove-border-objects-plugin/build-docker.sh index 00f49fd0a..70ca7937e 100755 --- a/transforms/images/remove-border-objects-plugin/build-docker.sh +++ b/transforms/images/remove-border-objects-plugin/build-docker.sh @@ -1,4 +1,4 @@ #!/bin/bash version=$(=2.5.0", + "filepattern>=2.2.1", + "numpy>=1.26.4", + "scikit-image>=0.25", +] + +[build-system] +requires = ["setuptools>=61.0", "wheel"] +build-backend = "setuptools.build_meta" + +[tool.setuptools.package-dir] +"" = "src" + +[tool.setuptools] +py-modules = ["main", "functions"] diff --git a/transforms/images/remove-border-objects-plugin/run-plugin.sh b/transforms/images/remove-border-objects-plugin/run-plugin.sh index 5273d0ac9..5e92700d1 100755 --- a/transforms/images/remove-border-objects-plugin/run-plugin.sh +++ b/transforms/images/remove-border-objects-plugin/run-plugin.sh @@ -19,5 +19,4 @@ docker run --mount type=bind,source=${datapath},target=/data/ \ polusai/remove-border-objects-plugin:${version} \ --inpDir ${inpDir} \ --pattern ${pattern} \ - --outDir ${outDir} - \ No newline at end of file + --outDir ${outDir} diff --git a/transforms/images/remove-border-objects-plugin/src/__init__.py b/transforms/images/remove-border-objects-plugin/src/__init__.py new file mode 100644 index 000000000..10cba6eea --- /dev/null +++ b/transforms/images/remove-border-objects-plugin/src/__init__.py @@ -0,0 +1 @@ +"""Remove border objects plugin package.""" diff --git a/transforms/images/remove-border-objects-plugin/src/functions.py b/transforms/images/remove-border-objects-plugin/src/functions.py index 578f6050b..254d7456c 100644 --- a/transforms/images/remove-border-objects-plugin/src/functions.py +++ b/transforms/images/remove-border-objects-plugin/src/functions.py @@ -1,37 +1,43 @@ -import os -from bfio import BioReader, BioWriter +"""Core image operations for removing border-touching labels.""" from pathlib import Path +from typing import Any + import numpy as np +from bfio import BioReader +from bfio import BioWriter from skimage.segmentation import relabel_sequential -class Discard_borderobjects: - +class DiscardBorderObjects: """Discard objects which touches image borders and relabelling of objects. + Args: inpDir (Path) : Path to label image directory outDir (Path) : Path to relabel image directory filename (str): Name of a label image Returns: label_image : ndarray of dtype int - label_image, with discarded objects touching border - """ - def __init__(self, inpDir, outDir, filename): - self.inpDir = inpDir - self.outDir= outDir + label_image, with discarded objects touching border. + """ + + def __init__(self, inp_dir: Path, out_dir: Path, filename: str) -> None: + """Load a label image and keep path/metadata for output.""" + self.inp_dir = inp_dir + self.out_dir = out_dir self.filename = filename - self.imagepath = os.path.join(self.inpDir, self.filename) + self.imagepath = str(self.inp_dir / self.filename) self.br_image = BioReader(self.imagepath) self.label_img = self.br_image.read().squeeze() - def discard_borderobjects(self): - """ This functions identifies which label pixels touches image borders and - setting the values of those label pixels to background pixels values which is 0 + def discard_borderobjects(self) -> np.ndarray[Any, Any]: + """Set border-touching labels to background (0). + + Identify labels that touch the image borders and clear those labels. """ borderobj = list(self.label_img[0, :]) borderobj.extend(self.label_img[:, 0]) - borderobj.extend(self.label_img[- 1, :]) - borderobj.extend(self.label_img[:, - 1]) + borderobj.extend(self.label_img[-1, :]) + borderobj.extend(self.label_img[:, -1]) borderobj = np.unique(borderobj).tolist() for obj in borderobj: @@ -39,22 +45,23 @@ def discard_borderobjects(self): return self.label_img - def relabel_sequential(self): - """ Sequential relabelling of objects in a label image - """ - relabel_img, _, inverse_map = relabel_sequential(self.label_img) + def relabel_sequential(self) -> tuple[np.ndarray[Any, Any], np.ndarray[Any, Any]]: + """Sequential relabelling of objects in a label image.""" + relabel_img, _, inverse_map = relabel_sequential(self.label_img) return relabel_img, inverse_map - - def save_relabel_image(self, x): - """ Writing images with relabelled and cleared border touching objects - """ - with BioWriter(file_path = Path(self.outDir, self.filename), - backend='python', - metadata = self.br_image.metadata, - X=self.label_img.shape[0], - Y=self.label_img.shape[0], - dtype=self.label_img.dtype) as bw: + def save_relabel_image(self, x: np.ndarray[Any, Any]) -> None: + """Writing images with relabelled and cleared border touching objects.""" + with BioWriter( + file_path=self.out_dir / self.filename, + backend="python", + metadata=self.br_image.metadata, + X=self.label_img.shape[0], + Y=self.label_img.shape[0], + dtype=self.label_img.dtype, + ) as bw: bw[:] = x - bw.close() - return \ No newline at end of file + + +# Backward-compatible alias for older imports/tests. +Discard_borderobjects = DiscardBorderObjects diff --git a/transforms/images/remove-border-objects-plugin/src/main.py b/transforms/images/remove-border-objects-plugin/src/main.py index e713ade19..1a86adf3d 100644 --- a/transforms/images/remove-border-objects-plugin/src/main.py +++ b/transforms/images/remove-border-objects-plugin/src/main.py @@ -1,89 +1,105 @@ -import argparse, logging, os, time, filepattern +"""CLI entrypoint for discard-border-objects plugin.""" +import argparse +import logging +import os +import time from pathlib import Path -from functions import * +import filepattern +from functions import DiscardBorderObjects -#Import environment variables -POLUS_LOG = getattr(logging,os.environ.get('POLUS_LOG','INFO')) -POLUS_EXT = os.environ.get('POLUS_EXT','.ome.tif') +# Import environment variables +POLUS_LOG = getattr(logging, os.environ.get("POLUS_LOG", "INFO")) +POLUS_EXT = os.environ.get("POLUS_EXT", ".ome.tif") # Initialize the logger -logging.basicConfig(format='%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s', - datefmt='%d-%b-%y %H:%M:%S') +logging.basicConfig( + format="%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s", + datefmt="%d-%b-%y %H:%M:%S", +) logger = logging.getLogger("main") logger.setLevel(POLUS_LOG) -def main(inpDir:Path, - pattern:str, - outDir:Path, - ): - starttime= time.time() - if pattern is None: - logger.info( - "No filepattern was provided so filepattern uses all input files" - ) +def main( + inp_dir: Path, + pattern: str, + out_dir: Path, +) -> None: + """Run border-object removal and relabeling for each image in input directory.""" + starttime = time.time() + if pattern is None: + logger.info("No filepattern was provided so filepattern uses all input files") - assert inpDir.exists(), logger.info("Input directory does not exist") - count=0 - fp = filepattern.FilePattern(inpDir,pattern) - imagelist = len([f for f in fp]) + if not inp_dir.exists(): + msg = f"Input directory does not exist: {inp_dir}" + logger.error(msg) + raise FileNotFoundError(msg) + count = 0 + fp = filepattern.FilePattern(inp_dir, pattern) + imagelist = len(list(fp)) - for f in fp(): - count += 1 - file = f[0]['file'].name - logger.info(f'Label image: {file}') - db = Discard_borderobjects(inpDir, outDir, file) - db.discard_borderobjects() - relabel_img, _ = db.relabel_sequential() - db.save_relabel_image(relabel_img) - logger.info(f'Saving {count}/{imagelist} Relabelled image with discarded objects: {file}') - logger.info('Finished all processes') - endtime = (time.time() - starttime)/60 - logger.info(f'Total time taken to process all images: {endtime}') + for f in fp(): + count += 1 + file = f[0]["file"].name + logger.info(f"Label image: {file}") + db = DiscardBorderObjects(inp_dir, out_dir, file) + db.discard_borderobjects() + relabel_img, _ = db.relabel_sequential() + db.save_relabel_image(relabel_img) + logger.info( + "Saving %s/%s Relabelled image with discarded objects: %s", + count, + imagelist, + file, + ) + logger.info("Finished all processes") + endtime = (time.time() - starttime) / 60 + logger.info(f"Total time taken to process all images: {endtime}") # ''' Argument parsing ''' logger.info("Parsing arguments...") -parser = argparse.ArgumentParser(prog='main', description='Discard Border Objects Plugin') +parser = argparse.ArgumentParser( + prog="main", + description="Discard Border Objects Plugin", +) # # Input arguments parser.add_argument( - "--inpDir", - dest="inpDir", - type=str, - help="Input image collection to be processed by this plugin", - required=True - ) + "--inpDir", + dest="inp_dir", + type=str, + help="Input image collection to be processed by this plugin", + required=True, +) parser.add_argument( - "--pattern", - dest="pattern", - type=str, - default=".+", - help="Filepattern regex used to parse image files", - required=False - ) + "--pattern", + dest="pattern", + type=str, + default=".+", + help="Filepattern regex used to parse image files", + required=False, +) # # Output arguments -parser.add_argument('--outDir', - dest='outDir', +parser.add_argument( + "--outDir", + dest="out_dir", type=str, - help='Output directory', - required=True - ) + help="Output directory", + required=True, +) # # Parse the arguments args = parser.parse_args() -inpDir = Path(args.inpDir) +inp_dir = Path(args.inp_dir) -if (inpDir.joinpath('images').is_dir()): - inputDir = inpDir.joinpath('images').absolute() -logger.info('inpDir = {}'.format(inpDir)) +if inp_dir.joinpath("images").is_dir(): + inp_dir = inp_dir.joinpath("images").absolute() +logger.info(f"inp_dir = {inp_dir}") pattern = args.pattern -logger.info("pattern = {}".format(pattern)) -outDir = Path(args.outDir) -logger.info('outDir = {}'.format(outDir)) +logger.info(f"pattern = {pattern}") +out_dir = Path(args.out_dir) +logger.info(f"out_dir = {out_dir}") -if __name__=="__main__": - main(inpDir=inpDir, - pattern=pattern, - outDir=outDir - ) \ No newline at end of file +if __name__ == "__main__": + main(inp_dir=inp_dir, pattern=pattern, out_dir=out_dir) diff --git a/transforms/images/remove-border-objects-plugin/src/requirements.txt b/transforms/images/remove-border-objects-plugin/src/requirements.txt index 41e2d5ea5..280ee3cfe 100644 --- a/transforms/images/remove-border-objects-plugin/src/requirements.txt +++ b/transforms/images/remove-border-objects-plugin/src/requirements.txt @@ -1,2 +1,2 @@ filepattern==1.4.7 -scikit-image>=0.17.2 \ No newline at end of file +scikit-image>=0.17.2 diff --git a/transforms/images/remove-border-objects-plugin/tests/__init__.py b/transforms/images/remove-border-objects-plugin/tests/__init__.py new file mode 100644 index 000000000..e7947bbe8 --- /dev/null +++ b/transforms/images/remove-border-objects-plugin/tests/__init__.py @@ -0,0 +1 @@ +"""Tests for remove-border-objects plugin.""" diff --git a/transforms/images/remove-border-objects-plugin/tests/test_main.py b/transforms/images/remove-border-objects-plugin/tests/test_main.py index 7d844c963..e7c161be9 100644 --- a/transforms/images/remove-border-objects-plugin/tests/test_main.py +++ b/transforms/images/remove-border-objects-plugin/tests/test_main.py @@ -1,69 +1,120 @@ - from pathlib import Path import numpy as np -import os, sys, unittest -from bfio import BioReader +import os +import sys +import tempfile +import shutil +import unittest + +from bfio import BioReader, BioWriter + dirpath = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(os.path.join(dirpath, '../')) -from src.functions import Discard_borderobjects +sys.path.append(os.path.join(dirpath, "../")) +from src.functions import Discard_borderobjects + +inpDir = Path(dirpath).parent.joinpath("images") +outDir = Path(dirpath).parent.joinpath("out") -inpDir = Path(dirpath).parent.joinpath('images') -outDir = Path(dirpath).parent.joinpath('out') +def _make_label_image(size: int = 16) -> np.ndarray: + """Small 2D label array: border=1, interior=2, center=3 (so border label is removed).""" + arr = np.zeros((size, size), dtype=np.uint16) + arr[0, :] = 1 + arr[-1, :] = 1 + arr[:, 0] = 1 + arr[:, -1] = 1 + arr[1:-1, 1:-1] = 2 + arr[size // 2 - 1 : size // 2 + 1, size // 2 - 1 : size // 2 + 1] = 3 + return arr + class Test_Discard_borderobjects(unittest.TestCase): + """Uses a temporary directory and synthetic label images so tests run without checked-in fixture data.""" def setUp(self) -> None: + self._tmpdir = Path(tempfile.mkdtemp(prefix="remove_border_test_")) + out_dir = self._tmpdir / "out" + out_dir.mkdir() + + for i, name in enumerate(["label_01.ome.tif", "label_02.ome.tif"], start=1): + label_2d = _make_label_image(16) + path = self._tmpdir / name + arr_5d = label_2d[:, :, np.newaxis, np.newaxis, np.newaxis] + with BioWriter( + path, + backend="python", + X=label_2d.shape[1], + Y=label_2d.shape[0], + Z=1, + C=1, + T=1, + dtype=label_2d.dtype, + ) as bw: + bw[:] = arr_5d - self.inpDir = inpDir - self.outDir=outDir - self.flist = os.listdir(self.inpDir) + self.inpDir = self._tmpdir + self.outDir = out_dir + self.flist = sorted( + f for f in os.listdir(self.inpDir) if f.endswith(".ome.tif") + ) + + def tearDown(self) -> None: + if hasattr(self, "_tmpdir") and self._tmpdir.is_dir(): + shutil.rmtree(self._tmpdir, ignore_errors=True) def test_discard_borderobjects(self): - for f in self.flist: - if f.endswith('.ome.tif'): - br = BioReader(Path(self.inpDir, f)) - image = br.read().squeeze() - dc = Discard_borderobjects(self.inpDir, self.outDir, f) - dc_image = dc.discard_borderobjects() - self.assertTrue(np.unique(image) != np.unique(dc_image)) - self.assertFalse(len(np.unique(image)) < len(np.unique(dc_image))) - - def boundary_labels(x:np.ndarray): - borderobj = list(x[0, :]) - borderobj.extend(x[:, 0]) - borderobj.extend(x[x.shape[0] - 1, :]) - borderobj.extend(x[:, x.shape[1] - 1]) - borderobj = np.unique(borderobj) - return borderobj - boundary_obj = boundary_labels(image) - dc_labels = np.unique(dc_image)[1:] - self.assertTrue(np.isin(dc_labels, boundary_obj)[0] ==False) + for f in self.flist: + if f.endswith(".ome.tif"): + br = BioReader(Path(self.inpDir, f)) + image = br.read().squeeze() + dc = Discard_borderobjects(self.inpDir, self.outDir, f) + dc_image = dc.discard_borderobjects() + self.assertFalse( + np.array_equal(np.unique(image), np.unique(dc_image)), + "unique labels should differ after discarding border objects", + ) + self.assertFalse(len(np.unique(image)) < len(np.unique(dc_image))) + + def boundary_labels(x: np.ndarray): + borderobj = list(x[0, :]) + borderobj.extend(x[:, 0]) + borderobj.extend(x[x.shape[0] - 1, :]) + borderobj.extend(x[:, x.shape[1] - 1]) + borderobj = np.unique(borderobj) + return borderobj + + boundary_obj = boundary_labels(image) + dc_labels = np.unique(dc_image)[1:] + self.assertTrue(np.isin(dc_labels, boundary_obj)[0] == False) def test_relabel_sequential(self): for f in self.flist: - if f.endswith('.ome.tif'): + if f.endswith(".ome.tif"): br = BioReader(Path(self.inpDir, f)) image = br.read().squeeze() dc = Discard_borderobjects(self.inpDir, self.outDir, f) dc_image = dc.discard_borderobjects() relabel_img, _ = dc.relabel_sequential() self.assertFalse(np.unique(np.diff(np.unique(relabel_img)))[0] != 1) - self.assertTrue(len(np.unique(image)) > len(np.unique(relabel_img))) + self.assertTrue( + len(np.unique(image)) >= len(np.unique(relabel_img)), + "after discarding border objects, unique labels should not increase", + ) def test_save_relabel_image(self): for f in self.flist: - if f.endswith('.ome.tif'): + if f.endswith(".ome.tif"): br = BioReader(Path(self.inpDir, f)) image = br.read().squeeze() dc = Discard_borderobjects(self.inpDir, self.outDir, f) dc_image = dc.discard_borderobjects() relabel_img, _ = dc.relabel_sequential() dc.save_relabel_image(relabel_img) - imagelist = [f for f in os.listdir(self.inpDir) if f.endswith('.ome.tif')] - relabel_list = [f for f in os.listdir(self.outDir) if f.endswith('.ome.tif')] + imagelist = [f for f in os.listdir(self.inpDir) if f.endswith(".ome.tif")] + relabel_list = [f for f in os.listdir(self.outDir) if f.endswith(".ome.tif")] self.assertTrue(len(imagelist) == len(relabel_list)) self.assertFalse(len(relabel_list) == 0) - -if __name__=="__main__": + + +if __name__ == "__main__": unittest.main() diff --git a/transforms/images/remove-border-objects-plugin/tests/version_test.py b/transforms/images/remove-border-objects-plugin/tests/version_test.py index c9d2c1c91..256dc247e 100644 --- a/transforms/images/remove-border-objects-plugin/tests/version_test.py +++ b/transforms/images/remove-border-objects-plugin/tests/version_test.py @@ -1,43 +1,50 @@ -import unittest, json +"""Version and container publication checks.""" +import json +import unittest from pathlib import Path -import urllib.request as request +from urllib import request +from urllib.error import HTTPError +from urllib.error import URLError + class VersionTest(unittest.TestCase): - """ Verify VERSION is correct """ - + """Verify VERSION is correct.""" + version_path = Path(__file__).parent.parent.joinpath("VERSION") json_path = Path(__file__).parent.parent.joinpath("plugin.json") - url = 'https://hub.docker.com/repository/docker/polusai/discard-border-objects-plugin/tags?page=1&ordering=last_updated' - - def test_plugin_manifest(self): - """ Tests VERSION matches the version in the plugin manifest """ - + url = "https://hub.docker.com/repository/docker/polusai/discard-border-objects-plugin/tags?page=1&ordering=last_updated" + + def test_plugin_manifest(self) -> None: + """Tests VERSION matches the version in the plugin manifest.""" # Get the plugin version - with open(self.version_path,'r') as file: - version = file.readline() - + with self.version_path.open(encoding="utf-8") as file: + version = file.readline().strip() + # Load the plugin manifest - with open(self.json_path,'r') as file: + with self.json_path.open(encoding="utf-8") as file: plugin_json = json.load(file) - - self.assertEqual(plugin_json['version'],version) - self.assertTrue(plugin_json['containerId'].endswith(version)) - def test_docker_hub(self): - """ Tests VERSION matches the latest docker container tag """ - + assert plugin_json["version"] == version + assert plugin_json["containerId"].endswith(version) + + def test_docker_hub(self) -> None: + """Tests VERSION matches the latest docker container tag.""" # Get the plugin version - with open(self.version_path,'r') as file: - version = file.readline() - - response = json.load(request.urlopen(self.url)) - if len(response['results']) == 0: - self.fail('Could not find repository or no containers are in the repository.') - latest_tag = json.load(response)['results'][0]['name'] - - self.assertEqual(latest_tag,version) - -if __name__=="__main__": - + with self.version_path.open(encoding="utf-8") as file: + version = file.readline().strip() + try: + with request.urlopen(self.url) as res: # noqa: S310 + response = json.load(res) + except (HTTPError, URLError): + self.skipTest("Docker Hub unreachable or returned error (e.g. 403)") + if len(response["results"]) == 0: + self.fail( + "Could not find repository or no containers are in the repository.", + ) + latest_tag = response["results"][0]["name"] + + assert latest_tag == version + + +if __name__ == "__main__": unittest.main() - \ No newline at end of file