diff --git a/pyproject.toml b/pyproject.toml index bdfecd61..90069bba 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -271,15 +271,9 @@ datetime = [] # Decorators Module - Caching and utilities # Use: pip install scitex[decorators] +# Real implementation lives in the standalone scitex-decorators package. decorators = [ - "joblib", - "tqdm", - "xarray", - "pytest-asyncio", - "matplotlib", - "scipy", - # # Heavy dependencies handled by _AVAILABLE flags - # "torch", + "scitex-decorators[all]>=0.1.0", ] # Diagram Module - Diagram generation diff --git a/src/scitex/decorators/BLUEPRINT.md b/src/scitex/decorators/BLUEPRINT.md deleted file mode 100644 index 5ff5b5b7..00000000 --- a/src/scitex/decorators/BLUEPRINT.md +++ /dev/null @@ -1,101 +0,0 @@ - - -# Data Type Conversion Decorators Blueprint - -## Overview - -A comprehensive system for seamless data type conversions between NumPy, PyTorch, Pandas, and other array formats with type preservation and efficient processing. - -## Assumptions and Requirements - -* Python 3.7+ -* NumPy, PyTorch, Pandas, xarray -* Type hints support -* Functional programming paradigm - -## Concerns - -* [ ] Ensure memory efficiency with large arrays -* [ ] Handle edge cases for nested data structures -* [ ] Maintain type consistency across nested decorator chains -* [ ] Optimize CUDA memory management - -## Workflow as Text Diagram - -`Input Data → Type Detection → Conversion to Target Type → Function Execution → Result Conversion → Type Restoration → Output Data` - -## Workflow as Mermaid - -```mermaid -graph TD - A[Input Data] --> B[Type Detection] - B --> C[Convert to Target Type] - C --> D[Function Execution] - D --> E[Result Processing] - E --> F[Type Restoration] - F --> G[Output Data] - - H[Decorator Selection] --> |torch_fn| C - H --> |numpy_fn| C - H --> |pandas_fn| C - H --> |batch_fn| E - - I[Device Detection] --> C - I --> F -``` - -## Directory Structure - -``` -scitex/decorators/ -├── __init__.py -├── _wrap.py -├── _converters.py -├── _torch_fn.py -├── _numpy_fn.py -├── _pandas_fn.py -├── _batch_fn.py -├── _timeout.py -├── _deprecated.py -├── _preserve_doc.py -├── _not_implemented.py -├── _cache_mem.py -├── _cache_disk.py -└── _DataTypeDecorators.py -``` - -## Modules and Roles - -| Module | Role | Functions | -|------------------------|--------------------|---------------------------------------------------| -| _wrap.py | Base decorator | wrap() - Preserves function metadata | -| _converters.py | Core conversion | to_torch(), to_numpy(), is_torch(), is_cuda() | -| _torch_fn.py | PyTorch conversion | torch_fn() - Ensures PyTorch tensor processing | -| _numpy_fn.py | NumPy conversion | numpy_fn() - Ensures NumPy array processing | -| _pandas_fn.py | Pandas conversion | pandas_fn() - Ensures DataFrame processing | -| _batch_fn.py | Batched processing | batch_fn() - Handles large data in chunks | -| _DataTypeDecorators.py | Unified interface | Combined decorators and processor class | -| _timeout.py | Execution control | timeout() - Limits execution time | -| _deprecated.py | Code lifecycle | deprecated() - Marks deprecated functions | -| _not_implemented.py | Code status | not_implemented() - Marks unimplemented functions | -| _cache_mem.py | Memory caching | cache_mem() - Memory-based results caching | -| _cache_disk.py | Disk caching | cache_disk() - Disk-based results caching | - -## Pros and Cons - -| Pros | Cons | -|-----------------------------|-------------------------------| -| Type preservation | Decorator overhead | -| Function metadata retention | Complex debugging | -| Device awareness (CPU/CUDA) | Learning curve for users | -| Composable decorators | Multiple conversion paths | -| Transparent conversions | Memory usage with large data | -| Batch processing support | Type inference limitations | -| Seamless API experience | Potential performance impact | -| Reduced boilerplate code | Management of decorator order | - - diff --git a/src/scitex/decorators/README.md b/src/scitex/decorators/README.md deleted file mode 100755 index e453796d..00000000 --- a/src/scitex/decorators/README.md +++ /dev/null @@ -1,179 +0,0 @@ - - - -# Decorators - -**Note**: While these decorators can be complex when used together, they provide powerful functionality for handling various data types gracefully. The recent updates have improved their ability to handle edge cases and preserve important parameter types. - -## 🎯 Auto-Ordering Feature (NEW) - -To eliminate decorator ordering complexity, we now provide an **auto-ordering system** that automatically enforces the correct decorator order regardless of how you write them in your code! - -**Note**: Auto-ordering is opt-in to maintain backward compatibility. - -### Enable Auto-Ordering - -```python -from scitex.decorators import enable_auto_order - -# Enable auto-ordering at the start of your script -enable_auto_order() - -# Now decorators will be automatically ordered correctly! -@torch_fn # Will be reordered automatically -@batch_fn # Even if written in "wrong" order -def my_function(x): - return x.mean() -``` - -### How It Works - -The auto-ordering system ensures decorators are always applied in this optimal order: -1. **Type conversion decorators** (`@torch_fn`, `@numpy_fn`, `@pandas_fn`) - applied first -2. **Batch processing** (`@batch_fn`) - applied last - -This means you can write decorators in any order and they'll work correctly: - -```python -# All of these will work identically with auto-ordering enabled: -@batch_fn -@torch_fn -def func1(x): ... - -@torch_fn -@batch_fn -def func2(x): ... - -# Even multiple type converters are handled correctly -@batch_fn -@numpy_fn -@torch_fn -def func3(x): ... -``` - -### Disable Auto-Ordering - -If needed, you can disable auto-ordering and return to manual control: - -```python -from scitex.decorators import disable_auto_order -disable_auto_order() -``` - -## Manual Decorator Ordering (Legacy) - -If auto-ordering is disabled, decorators must be manually ordered correctly: - -1. **Type conversion decorators** (`@torch_fn`, `@numpy_fn`, `@pandas_fn`) - apply first (bottom) -2. **Batch processing** (`@batch_fn`) - apply last (top) - -Example: -```python -@batch_fn # Applied second (processes batches) -@torch_fn # Applied first (converts to tensor) -def my_function(x): - return x.mean() -``` - -## Recent Improvements 🚀 - -### Better Type Handling -- **Nested Lists/Tuples**: Decorators now properly handle nested structures without errors -- **Scalar Preservation**: Scalars (int, float, bool, str) are preserved and not converted -- **Dimension Tuples**: Parameters like `dim=(0, 1)` are kept as tuples, not converted to tensors -- **Parameter Conflicts**: Fixed axis/dim parameter conflicts in multi-decorator scenarios - -### Enhanced Batch Processing -- **Scalar Results**: `batch_fn` now correctly handles functions that return scalars -- **Smart Stacking**: Automatically chooses the right stacking method (stack vs vstack) -- **Parameter Compatibility**: Only passes `batch_size` to functions that accept it - -### Example of Improved Handling - -```python -# These now work correctly without errors: -@torch_fn -def process_nested(x): - # Works with nested lists like [[1, 2], [3, 4]] - return torch.tensor(x).mean() - -@batch_fn -@numpy_fn -def compute_stats(data, dim=(0, 1)): - # dim tuple is preserved, not converted - return np.mean(data, axis=dim) - -@torch_fn -def keep_scalars(x, scale=2.5): - # scale remains a float, not converted to tensor - return x * scale -``` - -## batch_fn -A decorator for processing data in batches. - -### Features -- Requires explicit `batch_size` keyword argument - - Automatically applies `batch_size=-1` if not specified -- Supports multiple batch dimensions: - - Single dimension: `batch_size=4` - - Multiple dimensions: `batch_size=(4, 8)` -- Guarantees consistent output regardless of batch size -- Supports NumPy arrays, PyTorch tensors, Pandas DataFrames -- **NEW**: Handles scalar results correctly -- **NEW**: Smart parameter passing (only passes batch_size when accepted) - -## torch_fn -A decorator for PyTorch function compatibility. - -### Features -- Handles nested torch_fn decorators -- Automatically converts `axis=X` to `dim=X` for torch functions -- Automatically applies `device="cuda"` if available -- Preserves input data types in output: - - NumPy arrays → NumPy arrays - - Pandas objects → Pandas objects - - Xarray objects → Xarray objects -- **NEW**: Handles nested lists/tuples gracefully -- **NEW**: Preserves scalar parameters (int, float, bool, str) -- **NEW**: Preserves dimension tuples like `dim=(0, 1)` -- **NEW**: Fixed axis/dim parameter conflicts - -### Example -```python -@torch_fn -def my_mean(x, dim=None): - # Works with nested lists, preserves dim tuples - return x.mean(dim=dim) if dim is not None else x.mean() -``` - -## numpy_fn -A decorator for NumPy function compatibility. - -### Features -- Automatically converts torch tensors to numpy arrays -- Preserves input data types in output -- Handles axis-related parameter conversions -- **NEW**: Better handling of mixed data types -- **NEW**: Preserves scalar parameters -- **NEW**: Works seamlessly with batch_fn - -## pandas_fn -A decorator for Pandas function compatibility. - -### Features -- Automatically converts input data to pandas objects -- Preserves index and column information -- Handles DataFrame and Series operations consistently - -## xarray_fn -A decorator for Xarray function compatibility. - -### Features -- Automatically converts input data to xarray objects -- Preserves coordinate and dimension information -- Supports labeled dimension operations diff --git a/src/scitex/decorators/__init__.py b/src/scitex/decorators/__init__.py index b7dea51b..a5187fbd 100755 --- a/src/scitex/decorators/__init__.py +++ b/src/scitex/decorators/__init__.py @@ -1,90 +1,23 @@ -#!/usr/bin/env python3 -"""Scitex decorators module.""" +"""SciTeX decorators — thin compatibility shim for scitex-decorators. -from ._auto_order import ( - AutoOrderDecorator, - batch_fn, - disable_auto_order, - enable_auto_order, - numpy_fn, - pandas_fn, - torch_fn, -) -from ._batch_fn import batch_fn -from ._cache_disk import cache_disk -from ._cache_disk_async import cache_disk_async -from ._cache_mem import cache_mem -from ._combined import ( - batch_numpy_fn, - batch_pandas_fn, - batch_torch_fn, - numpy_batch_fn, - pandas_batch_fn, - torch_batch_fn, -) -from ._converters import ( - ConversionWarning, - is_cuda, - is_nested_decorator, - is_torch, - to_numpy, - to_torch, -) -from ._deprecated import deprecated -from ._not_implemented import not_implemented -from ._numpy_fn import numpy_fn -from ._pandas_fn import pandas_fn -from ._preserve_doc import preserve_doc -from ._signal_fn import signal_fn -from ._timeout import timeout -from ._torch_fn import torch_fn -from ._wrap import wrap -from ._xarray_fn import xarray_fn +Aliases ``scitex.decorators`` to the standalone ``scitex_decorators`` package +via ``sys.modules``. ``scitex.decorators is scitex_decorators``. +The full type-conversion / caching / batching / deprecation decorator surface +is preserved. -# Lazy import session decorator to avoid circular imports -def __getattr__(name): - if name == "session": - # Import the parent scitex module to get the wrapper - import scitex +Install: ``pip install scitex[decorators]`` (or ``pip install scitex-decorators``). +See: https://github.com/ywatanabe1989/scitex-decorators +""" - return scitex.session - raise AttributeError(f"module '{__name__}' has no attribute '{name}'") +import sys as _sys +try: + import scitex_decorators as _real +except ImportError as _e: # pragma: no cover + raise ImportError( + "scitex.decorators requires the 'scitex-decorators' package. " + "Install with: pip install scitex[decorators] (or: pip install scitex-decorators)" + ) from _e -__all__ = [ - "AutoOrderDecorator", - "ConversionWarning", - "batch_fn", - "batch_fn", - "batch_numpy_fn", - "batch_pandas_fn", - "batch_torch_fn", - "cache_disk", - "cache_disk_async", - "cache_mem", - "deprecated", - "disable_auto_order", - "enable_auto_order", - "is_cuda", - "is_nested_decorator", - "is_torch", - "not_implemented", - "numpy_batch_fn", - "numpy_fn", - "numpy_fn", - "pandas_batch_fn", - "pandas_fn", - "pandas_fn", - "preserve_doc", - "session", - "signal_fn", - "timeout", - "to_numpy", - "to_torch", - "torch_batch_fn", - "torch_fn", - "torch_fn", - "wrap", - "xarray_fn", -] +_sys.modules[__name__] = _real diff --git a/src/scitex/decorators/_auto_order.py b/src/scitex/decorators/_auto_order.py deleted file mode 100755 index 18e50cac..00000000 --- a/src/scitex/decorators/_auto_order.py +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Time-stamp: "2025-06-01 10:30:00 (ywatanabe)" -# File: ./scitex_repo/src/scitex/decorators/_auto_order.py - -""" -Auto-ordering decorator system that enforces predefined order regardless of -how decorators are written in code. - -The enforced order is: -1. Type conversion (innermost): torch_fn, numpy_fn, pandas_fn -2. Batch processing (outermost): batch_fn - -This uses a delayed application approach where decorators are collected -and then applied in the correct order when the function is first called. - -Example -------- ->>> from scitex.decorators import enable_auto_order ->>> enable_auto_order() ->>> ->>> # These will all work identically: ->>> @batch_fn ->>> @torch_fn ->>> def func1(x): -... return x.mean() ->>> ->>> @torch_fn ->>> @batch_fn # Order doesn't matter! ->>> def func2(x): -... return x.mean() - -The auto-ordering system eliminates decorator ordering complexity and -prevents common errors from incorrect decorator stacking. -""" - -from functools import wraps -from typing import Any, Callable, List, Tuple - -from ._batch_fn import batch_fn as _orig_batch_fn -from ._numpy_fn import numpy_fn as _orig_numpy_fn -from ._pandas_fn import pandas_fn as _orig_pandas_fn - -# Import original decorators -from ._torch_fn import torch_fn as _orig_torch_fn - -# Decorator priority (higher = inner/applied first) -DECORATOR_PRIORITY = { - "torch_fn": 100, - "numpy_fn": 100, - "pandas_fn": 100, - "batch_fn": 10, -} - -# Original decorator mapping -ORIGINAL_DECORATORS = { - "torch_fn": _orig_torch_fn, - "numpy_fn": _orig_numpy_fn, - "pandas_fn": _orig_pandas_fn, - "batch_fn": _orig_batch_fn, -} - - -class AutoOrderDecorator: - """Decorator that collects and applies decorators in predefined order.""" - - def __init__(self, name: str): - self.name = name - self.priority = DECORATOR_PRIORITY[name] - self.original = ORIGINAL_DECORATORS[name] - - def __call__(self, func: Callable) -> Callable: - # Initialize or get pending decorators list - if not hasattr(func, "_pending_decorators"): - # First decorator - create the wrapper - original_func = func - - @wraps(func) - def auto_ordered_wrapper(*args, **kwargs): - # On first call, apply decorators in correct order - if hasattr(auto_ordered_wrapper, "_pending_decorators"): - # Sort by priority (descending = innermost first) - decorators = sorted( - auto_ordered_wrapper._pending_decorators, - key=lambda x: x[1], - reverse=True, - ) - - # Apply decorators in order - final_func = original_func - for dec_name, _, dec_func in decorators: - final_func = dec_func(final_func) - - # Replace this wrapper with the final decorated function - auto_ordered_wrapper._final_func = final_func - delattr(auto_ordered_wrapper, "_pending_decorators") - - # Call the final decorated function - if hasattr(auto_ordered_wrapper, "_final_func"): - return auto_ordered_wrapper._final_func(*args, **kwargs) - else: - return original_func(*args, **kwargs) - - auto_ordered_wrapper._pending_decorators = [] - func = auto_ordered_wrapper - - # Add this decorator to pending list - func._pending_decorators.append((self.name, self.priority, self.original)) - - return func - - -# Create auto-ordering versions -torch_fn = AutoOrderDecorator("torch_fn") -numpy_fn = AutoOrderDecorator("numpy_fn") -pandas_fn = AutoOrderDecorator("pandas_fn") -batch_fn = AutoOrderDecorator("batch_fn") - - -# Enable auto-ordering globally -def enable_auto_order(): - """ - Enable auto-ordering for all decorators in the scitex.decorators module. - - This replaces the standard decorators with auto-ordering versions. - - Example - ------- - >>> import scitex - >>> scitex.decorators.enable_auto_order() - >>> - >>> # Now decorators will auto-order regardless of how they're written - >>> @scitex.decorators.batch_fn - >>> @scitex.decorators.torch_fn - >>> def my_func(x): - ... return x.mean() - """ - import scitex.decorators as decorators_module - - # Replace with auto-ordering versions - decorators_module.torch_fn = torch_fn - decorators_module.numpy_fn = numpy_fn - decorators_module.pandas_fn = pandas_fn - decorators_module.batch_fn = batch_fn - - print("Auto-ordering enabled for scitex decorators!") - print("Decorators will now apply in predefined order:") - print(" 1. Type conversion (torch_fn, numpy_fn, pandas_fn)") - print(" 2. Batch processing (batch_fn)") - - -def disable_auto_order(): - """Disable auto-ordering and restore original decorators.""" - import scitex.decorators as decorators_module - - # Restore original decorators - decorators_module.torch_fn = _orig_torch_fn - decorators_module.numpy_fn = _orig_numpy_fn - decorators_module.pandas_fn = _orig_pandas_fn - decorators_module.batch_fn = _orig_batch_fn - - print("Auto-ordering disabled. Using original decorators.") - - -__all__ = [ - "torch_fn", - "numpy_fn", - "pandas_fn", - "batch_fn", - "enable_auto_order", - "disable_auto_order", -] diff --git a/src/scitex/decorators/_batch_fn.py b/src/scitex/decorators/_batch_fn.py deleted file mode 100755 index 13f0632a..00000000 --- a/src/scitex/decorators/_batch_fn.py +++ /dev/null @@ -1,134 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Timestamp: "2025-05-01 09:18:26 (ywatanabe)" -# File: /home/ywatanabe/proj/scitex_repo/src/scitex/decorators/_batch_fn.py -# ---------------------------------------- -import os - -__FILE__ = "./src/scitex/decorators/_batch_fn.py" -__DIR__ = os.path.dirname(__FILE__) -from functools import wraps - -# ---------------------------------------- -from typing import Any as _Any -from typing import Callable - -import numpy as np -from tqdm import tqdm as _tqdm - -from ._converters import is_nested_decorator - - -def batch_fn(func: Callable) -> Callable: - @wraps(func) - def wrapper(x: _Any, *args: _Any, **kwargs: _Any) -> _Any: - # Skip batching if in a nested decorator context and batch_size is already set - if is_nested_decorator() and "batch_size" in kwargs: - return func(x, *args, **kwargs) - - # Set the current decorator context - wrapper._current_decorator = "batch_fn" - - # Mark that batch_fn has been applied - if not hasattr(wrapper, "_decorator_order"): - wrapper._decorator_order = [] - wrapper._decorator_order.append("batch_fn") - - batch_size = int(kwargs.pop("batch_size", 4)) - if len(x) <= batch_size: - # Only pass batch_size if the function accepts it - import inspect - - try: - sig = inspect.signature(func) - if "batch_size" in sig.parameters: - return func(x, *args, **kwargs, batch_size=batch_size) - else: - return func(x, *args, **kwargs) - except: - # Fallback for wrapped functions - return func(x, *args, **kwargs) - - n_batches = (len(x) + batch_size - 1) // batch_size - results = [] - - for i_batch in _tqdm(range(n_batches)): - start = i_batch * batch_size - end = min((i_batch + 1) * batch_size, len(x)) - - # Only pass batch_size if the function accepts it - import inspect - - try: - sig = inspect.signature(func) - if "batch_size" in sig.parameters: - batch_result = func( - x[start:end], *args, **kwargs, batch_size=batch_size - ) - else: - batch_result = func(x[start:end], *args, **kwargs) - except: - # Fallback for wrapped functions - batch_result = func(x[start:end], *args, **kwargs) - - import torch - - if isinstance(batch_result, torch.Tensor): - batch_result = batch_result.cpu() - elif isinstance(batch_result, tuple): - batch_result = tuple( - val.cpu() if isinstance(val, torch.Tensor) else val - for val in batch_result - ) - - results.append(batch_result) - - import torch - - if isinstance(results[0], tuple): - n_vars = len(results[0]) - combined_results = [] - for i_var in range(n_vars): - # Check if this element is stackable (tensor/array) or should be kept as-is - first_elem = results[0][i_var] - if isinstance(first_elem, (torch.Tensor, np.ndarray)): - # Stack tensors/arrays - if isinstance(first_elem, torch.Tensor): - if first_elem.ndim == 0: - combined = torch.stack([res[i_var] for res in results]) - else: - combined = torch.vstack([res[i_var] for res in results]) - else: - combined = np.vstack([res[i_var] for res in results]) - combined_results.append(combined) - else: - # For non-tensor elements (like lists), just take the first one - # (assuming they're all the same across batches) - combined_results.append(first_elem) - return tuple(combined_results) - elif isinstance(results[0], torch.Tensor): - # Check if results are 0-D tensors (scalars) - if results[0].ndim == 0: - return torch.stack(results) - else: - return torch.vstack(results) - elif isinstance(results[0], np.ndarray): - # Handle numpy arrays - if results[0].ndim == 0: - return np.array(results) - else: - return np.vstack(results) - elif isinstance(results[0], (int, float)): - # Handle scalar results - return np.array(results) if len(results) > 1 else results[0] - else: - # For lists and other types - return sum(results, []) - - # Mark as a wrapper for detection - wrapper._is_wrapper = True - wrapper._decorator_type = "batch_fn" - return wrapper - - -# EOF diff --git a/src/scitex/decorators/_cache_disk.py b/src/scitex/decorators/_cache_disk.py deleted file mode 100755 index 65cedd80..00000000 --- a/src/scitex/decorators/_cache_disk.py +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Timestamp: "2025-12-09 (ywatanabe)" -# File: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_cache_disk.py -# ---------------------------------------- -from __future__ import annotations - -import os - -__FILE__ = "./src/scitex/decorators/_cache_disk.py" -__DIR__ = os.path.dirname(__FILE__) -# ---------------------------------------- - -import functools - -from scitex.config import get_paths - - -def cache_disk(func): - """Disk caching decorator that uses joblib.Memory. - - joblib is lazy-imported to keep ``import scitex.decorators`` (and the - transitive ``import scitex.io`` chain) usable on venvs without joblib - installed. Without lazy-import, the eager ``from joblib import Memory`` - raised ``ModuleNotFoundError: No module named 'joblib'`` at import - time and broke any caller of scitex.io that didn't need caching at - all (todo#442, same class as #441 / #279). - - Usage: - @cache_disk - def expensive_function(x): - return x ** 2 - """ - cache_dir = str(get_paths().function_cache) - from joblib import Memory as _Memory # lazy: see todo#442 - - memory = _Memory(cache_dir, verbose=0) - - @functools.wraps(func) - def wrapper(*args, **kwargs): - cached_func = memory.cache(func) - return cached_func(*args, **kwargs) - - return wrapper - - -# EOF diff --git a/src/scitex/decorators/_cache_disk_async.py b/src/scitex/decorators/_cache_disk_async.py deleted file mode 100755 index 35d33fa1..00000000 --- a/src/scitex/decorators/_cache_disk_async.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Timestamp: "2025-12-09 (ywatanabe)" -# File: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_cache_disk_async.py -# ---------------------------------------- -from __future__ import annotations - -import os - -__FILE__ = "./src/scitex/decorators/_cache_disk_async.py" -__DIR__ = os.path.dirname(__FILE__) -# ---------------------------------------- -"""Async disk caching decorator using joblib.Memory.""" - -import asyncio -import functools - -from scitex.config import get_paths - - -def cache_disk_async(func): - """Disk caching decorator for async functions. - - joblib is lazy-imported inside the decorator body so that ``import - scitex.decorators`` does not fail on venvs without joblib (todo#442, - same class as #441 / #279). Without this, the eager top-level - ``from joblib import Memory`` propagates ``ModuleNotFoundError`` up - the ``scitex.io`` import chain and breaks unrelated callers. - - Usage: - @cache_disk_async - async def expensive_async_function(x): - await asyncio.sleep(1) - return x ** 2 - """ - cache_dir = str(get_paths().function_cache) - from joblib import Memory as _Memory # lazy: see todo#442 - - memory = _Memory(cache_dir, verbose=0) - - # Create sync wrapper for joblib - def sync_wrapper(*args, **kwargs): - return asyncio.run(func(*args, **kwargs)) - - cached_sync = memory.cache(sync_wrapper) - - @functools.wraps(func) - async def async_wrapper(*args, **kwargs): - # Run cached sync version in executor to avoid blocking - loop = asyncio.get_event_loop() - result = await loop.run_in_executor(None, lambda: cached_sync(*args, **kwargs)) - return result - - return async_wrapper - - -# EOF diff --git a/src/scitex/decorators/_cache_mem.py b/src/scitex/decorators/_cache_mem.py deleted file mode 100755 index a35e0dda..00000000 --- a/src/scitex/decorators/_cache_mem.py +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Time-stamp: "2024-11-07 05:52:33 (ywatanabe)" -# File: ./scitex_repo/src/scitex/decorators/_cache_mem.py - -from functools import lru_cache as _lru_cache - -# Memory cache -cache_mem = _lru_cache(maxsize=None) - - -# EOF diff --git a/src/scitex/decorators/_combined.py b/src/scitex/decorators/_combined.py deleted file mode 100755 index 8469f5fe..00000000 --- a/src/scitex/decorators/_combined.py +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Time-stamp: "2025-06-01 10:20:00 (ywatanabe)" -# File: ./scitex_repo/src/scitex/decorators/_combined.py - -""" -Combined decorators with predefined application order to reduce complexity. - -The order is always: type conversion → batch processing -This ensures consistent behavior and reduces unexpected interactions. -""" - -from functools import wraps -from typing import Callable - -from ._batch_fn import batch_fn -from ._numpy_fn import numpy_fn -from ._pandas_fn import pandas_fn -from ._torch_fn import torch_fn - - -def torch_batch_fn(func: Callable) -> Callable: - """ - Combined decorator: torch_fn → batch_fn. - - Converts inputs to torch tensors, then processes in batches. - This is the recommended order for PyTorch operations. - - Example - ------- - >>> @torch_batch_fn - ... def process_data(x, dim=None): - ... return x.mean(dim=dim) - """ - - @wraps(func) - @torch_fn - @batch_fn - def wrapper(*args, **kwargs): - return func(*args, **kwargs) - - return wrapper - - -def numpy_batch_fn(func: Callable) -> Callable: - """ - Combined decorator: numpy_fn → batch_fn. - - Converts inputs to numpy arrays, then processes in batches. - This is the recommended order for NumPy operations. - - Example - ------- - >>> @numpy_batch_fn - ... def process_data(x, axis=None): - ... return np.mean(x, axis=axis) - """ - - @wraps(func) - @numpy_fn - @batch_fn - def wrapper(*args, **kwargs): - return func(*args, **kwargs) - - return wrapper - - -def pandas_batch_fn(func: Callable) -> Callable: - """ - Combined decorator: pandas_fn → batch_fn. - - Converts inputs to pandas DataFrames, then processes in batches. - This is the recommended order for Pandas operations. - - Example - ------- - >>> @pandas_batch_fn - ... def process_data(df): - ... return df.describe() - """ - - @wraps(func) - @pandas_fn - @batch_fn - def wrapper(*args, **kwargs): - return func(*args, **kwargs) - - return wrapper - - -# Aliases for common use cases -batch_torch_fn = torch_batch_fn # Alternative name -batch_numpy_fn = numpy_batch_fn # Alternative name -batch_pandas_fn = pandas_batch_fn # Alternative name - - -__all__ = [ - "torch_batch_fn", - "numpy_batch_fn", - "pandas_batch_fn", - "batch_torch_fn", - "batch_numpy_fn", - "batch_pandas_fn", -] diff --git a/src/scitex/decorators/_converters.py b/src/scitex/decorators/_converters.py deleted file mode 100755 index 21e2565d..00000000 --- a/src/scitex/decorators/_converters.py +++ /dev/null @@ -1,303 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Timestamp: "2025-04-30 14:58:43 (ywatanabe)" -# File: /home/ywatanabe/proj/scitex_repo/src/scitex/decorators/_converters.py -# ---------------------------------------- -import os - -__FILE__ = "./src/scitex/decorators/_converters.py" -__DIR__ = os.path.dirname(__FILE__) -# ---------------------------------------- - -import functools -import warnings -from typing import Any as _Any -from typing import Callable, Dict, Tuple, Union - -import numpy as np - -""" -Core conversion utilities for handling data type transformations. -Provides consistent conversion between NumPy, PyTorch, Pandas, and other formats. -""" - - -class ConversionWarning(UserWarning): - pass - - -# Configure warnings -warnings.simplefilter("always", ConversionWarning) - - -@functools.lru_cache(maxsize=None) -def _cached_warning(message: str) -> None: - """Cache warnings to avoid repetition.""" - warnings.warn(message, category=ConversionWarning) - - -def _conversion_warning(old: _Any, new) -> None: - """Generate standardized type conversion warning.""" - message = ( - f"Converted from {type(old).__name__} to {type(new).__name__} ({new.device}). " - f"Consider using {type(new).__name__} ({new.device}) as input for faster computation." - ) - _cached_warning(message) - - -def _try_device(tensor, device: str): - """Try to move tensor to specified device with graceful fallback.""" - import torch - - if not isinstance(tensor, torch.Tensor): - return tensor - - if tensor.device.type == device: - return tensor - - try: - return tensor.to(device) - except RuntimeError as error: - if "cuda" in str(error).lower() and device == "cuda": - warnings.warn("CUDA memory insufficient, falling back to CPU.", UserWarning) - return tensor.cpu() - raise error - - -def is_torch(*args: _Any, **kwargs: _Any) -> bool: - """Check if any input is a PyTorch tensor.""" - import torch - - return any(isinstance(arg, torch.Tensor) for arg in args) or any( - isinstance(val, torch.Tensor) for val in kwargs.values() - ) - - -def is_cuda(*args: _Any, **kwargs: _Any) -> bool: - """Check if any input is a CUDA tensor.""" - import torch - - return any((isinstance(arg, torch.Tensor) and arg.is_cuda) for arg in args) or any( - (isinstance(val, torch.Tensor) and val.is_cuda) for val in kwargs.values() - ) - - -def _return_always(*args: _Any, **kwargs: _Any) -> Tuple[Tuple, Dict]: - """Always return args and kwargs as a tuple of (args, kwargs).""" - return args, kwargs - - -def _return_if(*args: _Any, **kwargs: _Any) -> Union[Tuple, Dict, None]: - """Return args and/or kwargs depending on what's provided.""" - if args and kwargs: - return args, kwargs - elif args: - return args - elif kwargs: - return kwargs - else: - return None - - -def to_torch( - *args: _Any, - return_fn: Callable = _return_if, - device: str = None, - **kwargs: _Any, -) -> _Any: - """Convert various data types to PyTorch tensors.""" - import torch - - if device is None: - device = kwargs.get("device", "cuda" if torch.cuda.is_available() else "cpu") - - def _to_torch(data: _Any) -> _Any: - """Internal conversion function for various data types.""" - import pandas as pd - import torch - - # Check for None - if data is None: - return None - - # Don't convert scalars (int, float, bool, str) - they should remain as is - if isinstance(data, (int, float, bool, str)): - return data - - # Handle collections - if isinstance(data, (tuple, list)): - # Check if it's a tuple/list of integers (like dimensions) - if all(isinstance(item, int) for item in data): - return data # Keep as is for dimension tuples - - # Check if it's a numeric array-like structure - try: - # Try to convert to tensor directly - new_data = torch.tensor(data).float() - new_data = _try_device(new_data, device) - if device == "cuda": - _conversion_warning(data, new_data) - return new_data - except: - # If conversion fails, process items individually and return as tensor if possible - converted_items = [_to_torch(item) for item in data if item is not None] - # Try to stack if all items are tensors - if converted_items and all( - isinstance(item, torch.Tensor) for item in converted_items - ): - try: - # Stack tensors along a new dimension - return torch.stack(converted_items) - except: - # Return as list if stacking fails - return converted_items - return converted_items - - # Handle pandas types - if isinstance(data, (pd.Series, pd.DataFrame)): - new_data = torch.tensor(data.to_numpy()).squeeze().float() - new_data = _try_device(new_data, device) - if device == "cuda": - _conversion_warning(data, new_data) - return new_data - - # Handle arrays - if isinstance(data, np.ndarray): - new_data = torch.tensor(data).float() - new_data = _try_device(new_data, device) - if device == "cuda": - _conversion_warning(data, new_data) - return new_data - - # Handle xarray - import xarray - - if ( - hasattr(data, "__class__") - and data.__class__.__module__ == "xarray.core.dataarray" - and data.__class__.__name__ == "DataArray" - ): - new_data = torch.tensor(np.array(data)).float() - new_data = _try_device(new_data, device) - if device == "cuda": - _conversion_warning(data, new_data) - return new_data - - # Return as is for other types - return data - - # Process args and kwargs - converted_args = [_to_torch(arg) for arg in args if arg is not None] - converted_kwargs = { - key: _to_torch(val) for key, val in kwargs.items() if val is not None - } - - # Handle axis/dim parameter conversion - # Only convert axis to dim if dim is not already present - if "axis" in converted_kwargs and "dim" not in converted_kwargs: - converted_kwargs["dim"] = converted_kwargs.pop("axis") - - # Return in the specified format - return return_fn(*converted_args, **converted_kwargs) - - -def to_numpy(*args: _Any, return_fn: Callable = _return_if, **kwargs: _Any) -> _Any: - """Convert various data types to NumPy arrays.""" - - def _to_numpy(data: _Any) -> _Any: - """Internal conversion function for various data types.""" - import pandas as pd - import torch - - # Check for None - if data is None: - return None - - # Don't convert scalars (int, float, bool, str) - they should remain as is - if isinstance(data, (int, float, bool, str)): - return data - - # Handle pandas types - if isinstance(data, (pd.Series, pd.DataFrame)): - return data.to_numpy().squeeze() - - # Handle torch tensors - if isinstance(data, torch.Tensor): - return data.detach().cpu().numpy() - - # Handle lists and tuples - if isinstance(data, (list, tuple)): - # Check if it's a tuple/list of integers (like dimensions) - if all(isinstance(item, int) for item in data): - return data # Keep as is for dimension tuples - - # Check if it's a numeric array-like structure - try: - # Try to convert to numpy array directly - return np.array(data) - except: - # If conversion fails, process items individually - converted_items = [_to_numpy(item) for item in data if item is not None] - # Try to stack if all items are numpy arrays - if converted_items and all( - isinstance(item, np.ndarray) for item in converted_items - ): - try: - # Stack arrays along a new dimension - return np.stack(converted_items) - except: - # Return as list if stacking fails - return converted_items - return converted_items - - # Return as is for other types - return data - - # Process args and kwargs - converted_args = [_to_numpy(arg) for arg in args if arg is not None] - converted_kwargs = { - key: _to_numpy(val) for key, val in kwargs.items() if val is not None - } - - # Handle dim/axis parameter conversion - # Only convert dim to axis if axis is not already present - if "dim" in converted_kwargs and "axis" not in converted_kwargs: - converted_kwargs["axis"] = converted_kwargs.pop("dim") - - # Return in the specified format - return return_fn(*converted_args, **converted_kwargs) - - -def is_nested_decorator(): - """Check if we're in a nested decorator context.""" - import inspect - - frame = inspect.currentframe() - current_decorator = None - decorator_chain = [] - - # Walk up the call stack - while frame: - if frame.f_code.co_name == "wrapper": - # Check if this frame has local variables - if frame.f_locals: - # Try to get the self reference if it's a method - if "self" in frame.f_locals: - decorator_chain.append(frame.f_locals["self"]) - - # Check if the wrapper has marked itself with decorator info - if "_current_decorator" in frame.f_locals: - decorator_type = frame.f_locals["_current_decorator"] - if current_decorator is None: - current_decorator = decorator_type - elif current_decorator != decorator_type: - # Found a different decorator in the chain - return True - - frame = frame.f_back - - # If we found more than one decorator in the chain - return len(decorator_chain) > 1 - - -# EOF diff --git a/src/scitex/decorators/_deprecated.py b/src/scitex/decorators/_deprecated.py deleted file mode 100755 index 47750676..00000000 --- a/src/scitex/decorators/_deprecated.py +++ /dev/null @@ -1,202 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Timestamp: "2025-08-21 20:57:29 (ywatanabe)" -# File: /home/ywatanabe/proj/scitex_repo/src/scitex/decorators/_deprecated.py -# ---------------------------------------- -from __future__ import annotations - -import os - -__FILE__ = __file__ -__DIR__ = os.path.dirname(__FILE__) -# ---------------------------------------- -import functools -import importlib -import warnings - - -def deprecated(reason=None, forward_to=None): - """ - A decorator to mark functions as deprecated. It will result in a warning being emitted - when the function is used. - - Args: - reason (str): A human-readable string explaining why this function was deprecated. - forward_to (str): Optional module path to forward calls to (e.g., "..session.start"). - If provided, calls will be forwarded to the new function instead of - executing the original deprecated function. - """ - - def decorator(func): - if forward_to: - # Create a forwarding wrapper with auto-generated docstring - @functools.wraps(func) - def new_func(*args, **kwargs): - warnings.warn( - f"{func.__name__} is deprecated: {reason}", - DeprecationWarning, - stacklevel=2, - ) - # Dynamic import and call forwarding - module_path, function_name = forward_to.rsplit(".", 1) - - # Handle relative imports - if module_path.startswith(".."): - # Get the module where the function was defined (not the calling module) - func_module = func.__module__ - - if func_module: - # Convert relative import to absolute based on the function's module - package_parts = func_module.split(".") - # Count the number of dots to determine how many levels to go up - level_count = 0 - for char in module_path: - if char == ".": - level_count += 1 - else: - break - - # Remove the relative part and create absolute path - if level_count > 0: - base_package_parts = package_parts[:-level_count] - if base_package_parts: - base_package = ".".join(base_package_parts) - relative_part = module_path.lstrip(".") - module_path = ( - base_package + "." + relative_part - if relative_part - else base_package - ) - else: - # Can't go up that many levels, fallback to absolute - module_path = module_path.lstrip(".") - - try: - target_module = importlib.import_module(module_path) - target_function = getattr(target_module, function_name) - return target_function(*args, **kwargs) - except (ImportError, AttributeError) as e: - # Fallback to original function if forwarding fails - warnings.warn( - f"Failed to forward {func.__name__} to {forward_to}: {e}. " - f"Using original deprecated implementation.", - RuntimeWarning, - stacklevel=2, - ) - return func(*args, **kwargs) - - # Auto-generate docstring for forwarding wrapper with target function's docstring - original_name = func.__name__ - new_location = forward_to.replace("..", "scitex.").lstrip(".") - - # Try to get the target function's docstring - target_docstring = "" - try: - # Get the same target we'll forward to - target_module_path, target_function_name = forward_to.rsplit(".", 1) - - # Handle relative imports for docstring retrieval - if target_module_path.startswith(".."): - func_module = func.__module__ - if func_module: - package_parts = func_module.split(".") - level_count = 0 - for char in target_module_path: - if char == ".": - level_count += 1 - else: - break - - if level_count > 0: - base_package_parts = package_parts[:-level_count] - if base_package_parts: - base_package = ".".join(base_package_parts) - relative_part = target_module_path.lstrip(".") - target_module_path = ( - base_package + "." + relative_part - if relative_part - else base_package - ) - else: - target_module_path = target_module_path.lstrip(".") - - target_module = importlib.import_module(target_module_path) - target_function = getattr(target_module, target_function_name) - if target_function.__doc__: - target_docstring = target_function.__doc__.strip() - except (ImportError, AttributeError): - pass # Fall back to basic docstring if target can't be imported - - # Create comprehensive docstring combining deprecation notice with target docs - if target_docstring: - forwarding_docstring = f"""**DEPRECATED: Use {new_location} instead** - -{target_docstring} - -Deprecation Notice ------------------- -This function is deprecated and will be removed in a future version. -Use `{new_location}` instead. This wrapper forwards all calls to the new function -while displaying a deprecation warning. - -Parameters ----------- -*args : tuple - Positional arguments passed to {new_location} -**kwargs : dict - Keyword arguments passed to {new_location} - -Returns -------- -Any - Same return value as {new_location} - -Warns ------ -DeprecationWarning - Always warns that this function is deprecated -""" - else: - # Fallback if target docstring unavailable - forwarding_docstring = f"""**DEPRECATED: Use {new_location} instead** - -This function provides backward compatibility for existing code that uses -{original_name}(). It forwards all calls to the new {new_location} -function while displaying a deprecation warning. - -Parameters ----------- -*args : tuple - Positional arguments passed to {new_location} -**kwargs : dict - Keyword arguments passed to {new_location} - -Returns -------- -Any - Same return value as {new_location} - -Warns ------ -DeprecationWarning - Always warns that this function is deprecated -""" - new_func.__doc__ = forwarding_docstring - return new_func - else: - # Original behavior for non-forwarding deprecation - @functools.wraps(func) - def new_func(*args, **kwargs): - warnings.warn( - f"{func.__name__} is deprecated: {reason}", - DeprecationWarning, - stacklevel=2, - ) - return func(*args, **kwargs) - - return new_func - - return decorator - - -# EOF diff --git a/src/scitex/decorators/_not_implemented.py b/src/scitex/decorators/_not_implemented.py deleted file mode 100755 index 711d6183..00000000 --- a/src/scitex/decorators/_not_implemented.py +++ /dev/null @@ -1,30 +0,0 @@ -#!./env/bin/python3 -# -*- coding: utf-8 -*- -# Time-stamp: "2024-06-07 22:16:25 (ywatanabe)" -# /home/ywatanabe/proj/scitex/src/scitex/gen/_not_implemented.py - -import warnings - - -def not_implemented(func): - """ - Decorator to mark methods as not implemented, issue a warning, and prevent their execution. - - Arguments: - func (callable): The function or method to decorate. - - Returns: - callable: A wrapper function that issues a warning and raises NotImplementedError when called. - """ - - def wrapper(*args, **kwargs): - # Issue a warning before raising the error - warnings.warn( - f"Attempt to use unimplemented method: '{func.__name__}'. This method is not yet available.", - category=FutureWarning, - stacklevel=2, - ) - # # Raise the NotImplementedError - # raise NotImplementedError(f"The method '{func.__name__}' is not implemented yet.") - - return wrapper diff --git a/src/scitex/decorators/_numpy_fn.py b/src/scitex/decorators/_numpy_fn.py deleted file mode 100755 index 6f4eb2e3..00000000 --- a/src/scitex/decorators/_numpy_fn.py +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Timestamp: "2025-04-30 15:29:53 (ywatanabe)" -# File: /home/ywatanabe/proj/scitex_repo/src/scitex/decorators/_numpy_fn.py -# ---------------------------------------- -import os - -__FILE__ = "./src/scitex/decorators/_numpy_fn.py" -__DIR__ = os.path.dirname(__FILE__) -# ---------------------------------------- - -import numpy as np - -THIS_FILE = "/home/ywatanabe/proj/scitex_repo/src/scitex/decorators/_numpy_fn.py" - -from functools import wraps -from typing import Any as _Any -from typing import Callable - -from ._converters import _return_always, is_nested_decorator, to_numpy - - -def numpy_fn(func: Callable) -> Callable: - @wraps(func) - def wrapper(*args: _Any, **kwargs: _Any) -> _Any: - # Skip conversion if already in a nested decorator context - if is_nested_decorator(): - results = func(*args, **kwargs) - return results - - # Set the current decorator context - wrapper._current_decorator = "numpy_fn" - - # Store original object for type preservation - original_object = args[0] if args else None - - converted_args, converted_kwargs = to_numpy( - *args, return_fn=_return_always, **kwargs - ) - - # Skip strict assertion for certain types that may not convert to arrays - # Instead, convert what we can and pass through what we can't - validated_args = [] - for arg_index, arg in enumerate(converted_args): - if isinstance(arg, np.ndarray): - validated_args.append(arg) - elif isinstance(arg, (int, float, str, type(None))): - # Pass through scalars and strings unchanged - validated_args.append(arg) - elif isinstance(arg, list) and all( - isinstance(item, np.ndarray) for item in arg - ): - # List of arrays - pass through as is - validated_args.append(arg) - else: - # Try one more conversion attempt - try: - validated_args.append(np.array(arg)) - except: - # If all else fails, pass through unchanged - validated_args.append(arg) - - results = func(*validated_args, **converted_kwargs) - - # Convert results back to original input types - if isinstance(results, np.ndarray): - if original_object is not None: - if isinstance(original_object, list): - return results.tolist() - elif ( - hasattr(original_object, "__class__") - and original_object.__class__.__name__ == "Tensor" - ): - import torch - - return torch.tensor(results) - elif ( - hasattr(original_object, "__class__") - and original_object.__class__.__name__ == "DataFrame" - ): - import pandas as pd - - return pd.DataFrame(results) - elif ( - hasattr(original_object, "__class__") - and original_object.__class__.__name__ == "Series" - ): - import pandas as pd - - return pd.Series(results) - return results - - return results - - # Mark as a wrapper for detection - wrapper._is_wrapper = True - wrapper._decorator_type = "numpy_fn" - return wrapper - - -# EOF diff --git a/src/scitex/decorators/_pandas_fn.py b/src/scitex/decorators/_pandas_fn.py deleted file mode 100755 index a9ab349d..00000000 --- a/src/scitex/decorators/_pandas_fn.py +++ /dev/null @@ -1,138 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Timestamp: "2025-04-30 15:44:00 (ywatanabe)" -# File: /home/ywatanabe/proj/scitex_repo/src/scitex/decorators/_pandas_fn.py -# ---------------------------------------- -import os - -__FILE__ = "./src/scitex/decorators/_pandas_fn.py" -__DIR__ = os.path.dirname(__FILE__) -# ---------------------------------------- - -THIS_FILE = "/home/ywatanabe/proj/scitex_repo/src/scitex/decorators/_pandas_fn.py" - -from functools import wraps -from typing import Any as _Any -from typing import Callable - -import numpy as np - -from ._converters import is_nested_decorator - - -def pandas_fn(func: Callable) -> Callable: - @wraps(func) - def wrapper(*args: _Any, **kwargs: _Any) -> _Any: - # Skip conversion if already in a nested decorator context - if is_nested_decorator(): - results = func(*args, **kwargs) - return results - - # Set the current decorator context - wrapper._current_decorator = "pandas_fn" - - # Store original object for type preservation - original_object = args[0] if args else None - - # Convert args to pandas DataFrames - def to_pandas(data): - import pandas as pd - import torch - import xarray as xr - - if data is None: - return None - elif isinstance(data, pd.DataFrame): - return data - elif isinstance(data, pd.Series): - return pd.DataFrame(data) - elif isinstance(data, np.ndarray): - return pd.DataFrame(data) - elif isinstance(data, list): - try: - return pd.DataFrame(data) - except: - # If list can't be converted to DataFrame, return as is - return data - elif hasattr(data, "__class__") and data.__class__.__name__ == "Tensor": - return pd.DataFrame(data.detach().cpu().numpy()) - elif hasattr(data, "__class__") and data.__class__.__name__ == "DataArray": - return pd.DataFrame(data.values) - elif isinstance(data, (int, float, str)): - # Don't convert scalars to DataFrames - return data - else: - try: - return pd.DataFrame([data]) - except: - # If conversion fails, return as is - return data - - converted_args = [to_pandas(arg) for arg in args] - converted_kwargs = {k: to_pandas(v) for k, v in kwargs.items()} - - # Skip strict assertion for certain types - import pandas as pd - - validated_args = [] - for arg_index, arg in enumerate(converted_args): - if isinstance(arg, pd.DataFrame): - validated_args.append(arg) - elif isinstance(arg, (int, float, str, type(None), pd.Series)): - # Pass through scalars, strings, Series, and None unchanged - validated_args.append(arg) - elif isinstance(arg, list) and all( - isinstance(item, pd.DataFrame) for item in arg - ): - # List of DataFrames - pass through as is - validated_args.append(arg) - else: - # Try one more conversion attempt - try: - validated_args.append(pd.DataFrame(arg)) - except: - # If all else fails, pass through unchanged - validated_args.append(arg) - - results = func(*validated_args, **converted_kwargs) - - # Convert results back to original input types - import pandas as pd - - if isinstance(results, pd.DataFrame): - if original_object is not None: - if isinstance(original_object, list): - return results.values.tolist() - elif isinstance(original_object, np.ndarray): - return results.values - elif ( - hasattr(original_object, "__class__") - and original_object.__class__.__name__ == "Tensor" - ): - import torch - - return torch.tensor(results.values) - elif isinstance(original_object, pd.Series): - return ( - pd.Series(results.iloc[:, 0]) - if results.shape[1] > 0 - else pd.Series() - ) - elif ( - hasattr(original_object, "__class__") - and original_object.__class__.__name__ == "DataArray" - ): - import xarray as xr - - return xr.DataArray(results.values) - return results - - return results - - # Mark as a wrapper for detection - wrapper._is_wrapper = True - wrapper._decorator_type = "pandas_fn" - return wrapper - - -# EOF diff --git a/src/scitex/decorators/_preserve_doc.py b/src/scitex/decorators/_preserve_doc.py deleted file mode 100755 index f5e3e977..00000000 --- a/src/scitex/decorators/_preserve_doc.py +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Time-stamp: "2024-11-14 07:44:00 (ywatanabe)" -# File: ./scitex_repo/src/scitex/decorators/_preserve_doc.py - -from functools import wraps - - -def preserve_doc(loader_func): - """Wrap the loader functions to preserve their docstrings""" - - @wraps(loader_func) - def wrapper(*args, **kwargs): - return loader_func(*args, **kwargs) - - return wrapper - - -# EOF diff --git a/src/scitex/decorators/_signal_fn.py b/src/scitex/decorators/_signal_fn.py deleted file mode 100755 index 947debaf..00000000 --- a/src/scitex/decorators/_signal_fn.py +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Timestamp: "2025-05-31 (ywatanabe)" -# File: /home/ywatanabe/proj/scitex_repo/src/scitex/decorators/_signal_fn.py -# ---------------------------------------- -import os - -__FILE__ = "./src/scitex/decorators/_signal_fn.py" -__DIR__ = os.path.dirname(__FILE__) -# ---------------------------------------- - -from functools import wraps -from typing import Any as _Any -from typing import Callable - -import numpy as np - -from ._converters import _return_always, is_nested_decorator, to_torch - - -def signal_fn(func: Callable) -> Callable: - """Decorator for signal processing functions that converts only the first argument (signal) to torch tensor. - - This decorator is designed for DSP functions where: - - The first argument is the signal data that should be converted to torch tensor - - Other arguments (like sampling frequency, bands, etc.) should remain as-is - """ - - @wraps(func) - def wrapper(*args: _Any, **kwargs: _Any) -> _Any: - # Skip conversion if already in a nested decorator context - if is_nested_decorator(): - results = func(*args, **kwargs) - return results - - # Set the current decorator context - wrapper._current_decorator = "signal_fn" - - # Store original object for type preservation - original_object = args[0] if args else None - - # Convert only the first argument (signal) to torch tensor - if args: - # Convert first argument to torch - converted_first_arg = to_torch(args[0], return_fn=_return_always)[0][0] - - # Keep other arguments as-is - converted_args = (converted_first_arg,) + args[1:] - else: - converted_args = args - - results = func(*converted_args, **kwargs) - - # Convert results back to original input types - import torch - - if isinstance(results, torch.Tensor): - if original_object is not None: - if isinstance(original_object, list): - return results.detach().cpu().numpy().tolist() - elif isinstance(original_object, np.ndarray): - return results.detach().cpu().numpy() - elif ( - hasattr(original_object, "__class__") - and original_object.__class__.__name__ == "DataFrame" - ): - import pandas as pd - - return pd.DataFrame(results.detach().cpu().numpy()) - elif ( - hasattr(original_object, "__class__") - and original_object.__class__.__name__ == "Series" - ): - import pandas as pd - - return pd.Series(results.detach().cpu().numpy().flatten()) - elif ( - hasattr(original_object, "__class__") - and original_object.__class__.__name__ == "DataArray" - ): - import xarray as xr - - return xr.DataArray(results.detach().cpu().numpy()) - return results - - # Handle tuple returns (e.g., (signal, frequencies)) - elif isinstance(results, tuple): - import torch - - converted_results = [] - for r in results: - if isinstance(r, torch.Tensor): - if original_object is not None and isinstance( - original_object, np.ndarray - ): - converted_results.append(r.detach().cpu().numpy()) - else: - converted_results.append(r) - else: - converted_results.append(r) - return tuple(converted_results) - - return results - - # Mark as a wrapper for detection - wrapper._is_wrapper = True - wrapper._decorator_type = "signal_fn" - return wrapper - - -# EOF diff --git a/src/scitex/decorators/_skills/SKILL.md b/src/scitex/decorators/_skills/SKILL.md deleted file mode 100644 index 6d3c2d67..00000000 --- a/src/scitex/decorators/_skills/SKILL.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -name: stx.decorators -description: Function decorators for automatic type conversion, batch processing, caching, timeout protection, deprecation, and lifecycle management. Use when writing scientific computing functions that need to accept multiple array types or require performance optimizations. ---- - -# stx.decorators - -The `stx.decorators` module provides a focused set of Python decorators for scientific computing workflows. All decorators are accessible as `stx.decorators.` or via direct import from `scitex.decorators`. - -## Sub-skills - -### Type Conversion -- [type-conversion.md](type-conversion.md) — `numpy_fn`, `torch_fn`, `pandas_fn`, `xarray_fn`, `signal_fn`: auto-convert inputs to a target array type and restore the caller's original type on output. Includes `to_numpy`, `to_torch`, `is_torch`, `is_cuda` utilities. - -### Batch Processing -- [batch-processing.md](batch-processing.md) — `batch_fn`, `torch_batch_fn`, `numpy_batch_fn`, `pandas_batch_fn` (and their aliases): split large inputs into mini-batches, process each batch, and concatenate results. Includes `AutoOrderDecorator`, `enable_auto_order`, `disable_auto_order` for order-independent stacking. - -### Caching -- [caching.md](caching.md) — `cache_disk`, `cache_disk_async`, `cache_mem`: persistent joblib-backed disk cache for sync and async functions; unbounded in-memory LRU cache for hashable-argument functions. - -### Lifecycle and Metadata -- [lifecycle.md](lifecycle.md) — `timeout`, `deprecated`, `not_implemented`, `preserve_doc`, `wrap`: timeout protection via child process, deprecation forwarding, not-implemented stubs, and docstring preservation helpers. - -## Quick Reference - -```python -import scitex as stx - -# --- Type conversion --- -@stx.decorators.numpy_fn -def compute(arr): # always np.ndarray inside - return arr.mean(axis=0) - -@stx.decorators.torch_fn -def model_step(x, dim=0): # always torch.Tensor, auto-CUDA - return x.mean(dim=dim) - -@stx.decorators.pandas_fn -def describe(df): # always pd.DataFrame inside - return df.describe() - -@stx.decorators.signal_fn -def bandpass(signal, fs, low_hz, high_hz): - # only signal (first arg) converted; fs etc. stay as scalars - ... - -# --- Batch processing --- -@stx.decorators.batch_fn -def process(x, scale=1.0): - return x * scale - -result = process(big_array, scale=2.0, batch_size=64) - -# --- Combined: type conversion + batching --- -@stx.decorators.torch_batch_fn -def forward(x): - return x.mean() - -# --- Auto-order (decorator order no longer matters) --- -stx.decorators.enable_auto_order() - -@stx.decorators.batch_fn -@stx.decorators.torch_fn # order irrelevant — auto-corrected at first call -def func(x): - return x.mean() - -# --- Caching --- -@stx.decorators.cache_disk -def expensive(x): # persisted to disk via joblib - return slow_compute(x) - -@stx.decorators.cache_disk_async -async def fetch(url): # async version, cached to disk - ... - -@stx.decorators.cache_mem -def fast_helper(n: int): # in-memory LRU, unbounded - return compute(n) - -# --- Lifecycle --- -@stx.decorators.timeout(seconds=30) -def slow_io(): - return fetch_data() - -@stx.decorators.deprecated(reason="Use new_api().", forward_to="mymod.new_api") -def old_api(*args, **kwargs): - pass - -@stx.decorators.not_implemented -def future_feature(): - pass -``` - -## All Exported Names - -``` -AutoOrderDecorator ConversionWarning batch_fn -batch_numpy_fn batch_pandas_fn batch_torch_fn -cache_disk cache_disk_async cache_mem -deprecated disable_auto_order enable_auto_order -is_cuda is_nested_decorator is_torch -not_implemented numpy_batch_fn numpy_fn -pandas_batch_fn pandas_fn preserve_doc -session signal_fn timeout -to_numpy to_torch torch_batch_fn -torch_fn wrap xarray_fn -``` - -(`session` is a lazy re-export of `scitex.session` to avoid circular imports.) diff --git a/src/scitex/decorators/_skills/batch-processing.md b/src/scitex/decorators/_skills/batch-processing.md deleted file mode 100644 index f781a94f..00000000 --- a/src/scitex/decorators/_skills/batch-processing.md +++ /dev/null @@ -1,139 +0,0 @@ ---- -description: batch_fn decorator and combined batch+type-conversion decorators for splitting large inputs into mini-batches and combining results. ---- - -# Batch Processing Decorators - -## `batch_fn` - -Splits the first positional argument along its length into fixed-size chunks, runs the function on each chunk, then concatenates results. - -```python -from scitex.decorators import batch_fn - -@batch_fn -def process(x, scale=1.0): - return x * scale # receives a slice of x each iteration - -result = process(large_array, scale=2.0, batch_size=32) -``` - -**Signature:** `batch_fn(func: Callable) -> Callable` - -The decorated function accepts one extra keyword argument: - -| kwarg | type | default | description | -|---|---|---|---| -| `batch_size` | `int` | `4` | number of samples per batch | - -`batch_size` is popped from kwargs before being forwarded to `func` (unless `func` explicitly accepts `batch_size` in its signature, in which case it is forwarded). - -### Batching Behavior - -- If `len(x) <= batch_size`, the function is called once with the whole input (no looping). -- Otherwise, iterates over `ceil(len(x) / batch_size)` batches using `tqdm` for progress display. -- Slicing: `x[start:end]` — works with any object that supports length and slicing (numpy arrays, torch tensors, lists, pandas DataFrames). - -### Result Combination - -| Batch result type | Combination method | -|---|---| -| `torch.Tensor` (0-D) | `torch.stack(results)` | -| `torch.Tensor` (n-D) | `torch.vstack(results)` | -| `np.ndarray` (0-D) | `np.array(results)` | -| `np.ndarray` (n-D) | `np.vstack(results)` | -| `tuple` of tensors/arrays | each element stacked individually | -| `tuple` with non-tensor elements | first batch's non-tensor elements reused | -| `int` or `float` | `np.array(results)` | -| `list` | concatenated via `sum(results, [])` | - -GPU tensors are moved to CPU (`.cpu()`) before collection to avoid OOM errors during accumulation. - -### Recommended Decorator Order - -When combining with a type-conversion decorator, apply `batch_fn` **outermost** (written first in the decorator stack) and the type decorator **innermost** (written last): - -```python -@batch_fn # outer — splits input -@torch_fn # inner — converts each batch to tensor -def my_func(x): - return x.mean() -``` - -This ensures each mini-batch is converted to the target type individually rather than trying to convert the full dataset at once. - ---- - -## Combined Decorators - -`_combined.py` ships pre-built combinations that enforce the correct order. Use these instead of stacking manually when you need both type conversion and batching: - -| Name | Equivalent to | Aliases | -|---|---|---| -| `torch_batch_fn` | `@torch_fn` + `@batch_fn` | `batch_torch_fn` | -| `numpy_batch_fn` | `@numpy_fn` + `@batch_fn` | `batch_numpy_fn` | -| `pandas_batch_fn` | `@pandas_fn` + `@batch_fn` | `batch_pandas_fn` | - -```python -from scitex.decorators import torch_batch_fn, numpy_batch_fn, pandas_batch_fn - -@torch_batch_fn -def model_forward(x, dim=None): - return x.mean(dim=dim) - -@numpy_batch_fn -def compute_stats(x, axis=None): - return x.mean(axis=axis) - -@pandas_batch_fn -def summarize(df): - return df.describe() -``` - -All three are equivalent to writing: - -```python -@wraps(func) -@torch_fn # or numpy_fn / pandas_fn -@batch_fn -def wrapper(*args, **kwargs): - return func(*args, **kwargs) -``` - ---- - -## Auto-Order System - -`_auto_order.py` provides `AutoOrderDecorator`, which makes decorator ordering irrelevant. When enabled, `batch_fn`, `torch_fn`, `numpy_fn`, and `pandas_fn` become `AutoOrderDecorator` instances that collect applied decorators and always apply them in the correct fixed order on first call. - -```python -from scitex.decorators import enable_auto_order, disable_auto_order - -enable_auto_order() - -# These two are now identical at runtime: -@batch_fn -@torch_fn -def func1(x): - return x.mean() - -@torch_fn -@batch_fn # written in wrong order — auto-corrected -def func2(x): - return x.mean() - -disable_auto_order() # restore original decorators -``` - -**Priority constants** (higher = applied first / innermost): - -| Decorator | Priority | -|---|---| -| `torch_fn` | 100 | -| `numpy_fn` | 100 | -| `pandas_fn` | 100 | -| `batch_fn` | 10 | - -Auto-ordering works via lazy application: decorators are collected into `func._pending_decorators` list, then sorted by priority and applied on the **first function call**, not at decoration time. After the first call `_pending_decorators` is replaced by `_final_func`. - -`enable_auto_order()` / `disable_auto_order()` mutate `scitex.decorators` module globals in-place. diff --git a/src/scitex/decorators/_skills/caching.md b/src/scitex/decorators/_skills/caching.md deleted file mode 100644 index ea2887e3..00000000 --- a/src/scitex/decorators/_skills/caching.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -description: Persistent disk caching (cache_disk, cache_disk_async) and in-memory LRU caching (cache_mem) decorators for expensive computations. ---- - -# Caching Decorators - -## `cache_disk` - -Persistent disk caching backed by `joblib.Memory`. Cache is keyed by a content hash of the function arguments, so repeated calls with the same inputs are served from disk. - -```python -from scitex.decorators import cache_disk - -@cache_disk -def expensive_compute(x, n_iter=100): - # heavy computation - return result -``` - -**Signature:** `cache_disk(func: Callable) -> Callable` - -No-argument decorator — applied directly without parentheses. - -**Cache location:** Reads from `scitex.config.get_paths().function_cache`. This resolves to the project's configured function-cache directory (see `stx.config`). The directory is created automatically by `joblib.Memory`. - -**Implementation detail:** Creates a new `joblib.Memory(cache_dir, verbose=0)` instance at decoration time. On each call, wraps `func` with `memory.cache(func)` and calls the cached version. Joblib handles serialization, cache invalidation, and sub-directory layout internally. - -**Invalidation:** There is no programmatic cache-clear API on the decorator itself. Clear the cache by deleting the directory at `get_paths().function_cache` or by using `joblib.Memory` directly. - ---- - -## `cache_disk_async` - -Same as `cache_disk` but for `async def` functions. The async function is wrapped in a synchronous executor so joblib (which is synchronous) can cache it. - -```python -from scitex.decorators import cache_disk_async - -@cache_disk_async -async def fetch_remote_data(url: str): - async with aiohttp.ClientSession() as session: - ... - return data -``` - -**Signature:** `cache_disk_async(func: Callable) -> Callable` - -No-argument decorator. - -**Mechanism:** -1. A sync wrapper (`sync_wrapper`) calls `asyncio.run(func(...))` to execute the async function synchronously. -2. `sync_wrapper` is cached by `joblib.Memory`. -3. The outer `async_wrapper` (returned to the caller) runs the cached sync version inside `loop.run_in_executor(None, ...)` to avoid blocking the event loop. - -**Limitation:** `asyncio.run()` inside the sync wrapper means this will fail if called from within an already-running event loop (e.g., Jupyter notebooks with existing loops). In that context use `cache_disk` with a synchronous wrapper instead. - -**Cache location:** Same as `cache_disk` — `scitex.config.get_paths().function_cache`. - ---- - -## `cache_mem` - -In-memory LRU (Least Recently Used) cache. A direct alias for Python's standard-library `functools.lru_cache(maxsize=None)`. - -```python -from scitex.decorators import cache_mem - -@cache_mem -def compute(x: int, y: int) -> float: - return heavy_math(x, y) -``` - -**Signature:** `cache_mem` is an alias for `functools.lru_cache(maxsize=None)` — unbounded cache. - -**Requirements:** All arguments must be hashable (same constraint as `functools.lru_cache`). NumPy arrays, pandas DataFrames, and torch tensors are not hashable and will raise `TypeError`. Convert to tuples or use `cache_disk` for array-valued inputs. - -**Cache introspection:** - -```python -# View cache statistics -compute.cache_info() -# CacheInfo(hits=3, misses=5, maxsize=None, currsize=5) - -# Clear the cache manually -compute.cache_clear() -``` - -**Lifetime:** Cache lives as long as the decorated function object exists (i.e., for the process lifetime). Data is not persisted across restarts. - ---- - -## Choosing Between Cache Types - -| Scenario | Recommendation | -|---|---| -| Heavy CPU computation, array inputs | `cache_disk` — survives process restarts | -| Fast helper with hashable scalar args | `cache_mem` — zero overhead, no I/O | -| Async I/O fetches (network, DB) | `cache_disk_async` — avoids redundant remote calls | -| Need cache invalidation control | `cache_disk` — delete cache directory to invalidate | -| Jupyter or interactive use | `cache_mem` — `cache_disk_async` has event-loop limitations | diff --git a/src/scitex/decorators/_skills/lifecycle.md b/src/scitex/decorators/_skills/lifecycle.md deleted file mode 100644 index e3ffd34a..00000000 --- a/src/scitex/decorators/_skills/lifecycle.md +++ /dev/null @@ -1,154 +0,0 @@ ---- -description: Decorators for function lifecycle management: timeout protection, deprecation warnings, not-implemented stubs, docstring preservation, and basic wrapping. ---- - -# Lifecycle and Metadata Decorators - -## `timeout` - -Runs a function in a child process and raises `TimeoutError` if it does not complete within the given number of seconds. - -```python -from scitex.decorators import timeout - -@timeout(seconds=30) -def fetch_data(url: str): - return requests.get(url).json() - -@timeout(seconds=5, error_message="Database query took too long") -def slow_query(sql: str): - return db.execute(sql) -``` - -**Signature:** `timeout(seconds: int = 10, error_message: str = "Timeout") -> decorator` - -Parametric decorator — called with parentheses. - -**Mechanism:** Uses `multiprocessing.Process` and `multiprocessing.Queue`. -1. Spawns a child process that calls the function and puts the result in a `Queue`. -2. The parent joins the child with `join(timeout=seconds)`. -3. If the child is still alive after the timeout, `process.terminate()` is called and `TimeoutError(error_message)` is raised. -4. Otherwise, the result is retrieved from the queue with `queue.get()`. - -**Caveats:** -- Spawned process means function arguments and return values must be picklable. -- Does not work with lambda functions or locally-defined classes that cannot be pickled. -- On Windows, the spawn start method requires the call to be inside `if __name__ == "__main__":`. - ---- - -## `deprecated` - -Emits a `DeprecationWarning` when the decorated function is called. Optionally forwards all calls to a new function. - -```python -from scitex.decorators import deprecated - -# Simple warning only -@deprecated(reason="Use new_func() instead.") -def old_func(x): - return x * 2 - -# Warning + automatic call forwarding -@deprecated( - reason="Moved to scitex.session module.", - forward_to="scitex.session.start" -) -def start_session(*args, **kwargs): - pass # body never executes when forwarding succeeds -``` - -**Signature:** `deprecated(reason: str = None, forward_to: str = None) -> decorator` - -Parametric decorator — called with parentheses. - -**Parameters:** -| param | type | description | -|---|---|---| -| `reason` | `str` | Human-readable deprecation message included in the warning. | -| `forward_to` | `str` | Dotted module path to forward calls to (e.g., `"scitex.io.save"`). Supports relative notation: `"..session.start"` resolves relative to the decorated function's module. | - -**Behavior with `forward_to`:** -1. Emits `DeprecationWarning` with `reason`. -2. Dynamically imports the module and retrieves the function via `importlib.import_module` + `getattr`. -3. Calls the target function with the same `*args` and `**kwargs`. -4. If import or attribute lookup fails, emits a `RuntimeWarning` and falls back to the original deprecated function body. -5. Auto-generates a docstring that combines the deprecation notice with the target function's docstring (if available). - -**Behavior without `forward_to`:** -1. Emits `DeprecationWarning` with `reason`. -2. Executes the original function body normally. - ---- - -## `not_implemented` - -Marks a function as not yet implemented. When called, emits a `FutureWarning` and returns `None` (does not raise an exception and does not execute the function body). - -```python -from scitex.decorators import not_implemented - -@not_implemented -def future_feature(x, y): - # This body is never executed - pass -``` - -**Signature:** `not_implemented(func: Callable) -> Callable` - -No-argument decorator. - -**Warning category:** `FutureWarning` (not `NotImplementedError` — the call silently returns `None`). - -**Message format:** `"Attempt to use unimplemented method: ''. This method is not yet available."` - -Use case: placeholder stubs in public APIs that should exist in the interface but are not yet coded. Allows code that calls the function to continue without crashing. - ---- - -## `preserve_doc` - -Wraps a function while explicitly preserving its docstring using `functools.wraps`. - -```python -from scitex.decorators import preserve_doc - -@preserve_doc -def load_csv(path: str): - """Load a CSV file and return a DataFrame.""" - return pd.read_csv(path) -``` - -**Signature:** `preserve_doc(loader_func: Callable) -> Callable` - -No-argument decorator. - -Functionally equivalent to applying `@functools.wraps` manually. Intended for documentation tooling pipelines where an explicit named decorator makes the intent clearer in source code. - ---- - -## `wrap` - -A minimal wrapper template that preserves function metadata and exposes the original function reference. - -```python -from scitex.decorators import wrap - -@wrap -def my_function(x): - return x + 1 - -# Access original -my_function._original_func # the unwrapped function -my_function._is_wrapper # True -``` - -**Signature:** `wrap(func: Callable) -> Callable` - -No-argument decorator. - -Sets two attributes on the wrapper: -- `_original_func` — reference to the unwrapped function. -- `_is_wrapper = True` — flag for decorator-stack inspection. - -Use case: template for building custom decorators, or when you need to mark a function as wrapped for downstream introspection without changing behavior. diff --git a/src/scitex/decorators/_skills/type-conversion.md b/src/scitex/decorators/_skills/type-conversion.md deleted file mode 100644 index 56a17efc..00000000 --- a/src/scitex/decorators/_skills/type-conversion.md +++ /dev/null @@ -1,199 +0,0 @@ ---- -description: Automatic input-type conversion decorators (numpy_fn, torch_fn, pandas_fn, xarray_fn, signal_fn) that convert function inputs to a target array type and convert outputs back to the original caller type. ---- - -# Type Conversion Decorators - -These decorators convert function inputs to a specific array type, run the function, then convert the output back to whatever type the caller passed in. Each decorator is a no-argument decorator applied directly to a function definition. - -## Decorators - -### `numpy_fn` - -```python -from scitex.decorators import numpy_fn - -@numpy_fn -def my_func(arr): - # arr is guaranteed to be np.ndarray here - return arr.mean(axis=0) -``` - -**Signature:** `numpy_fn(func: Callable) -> Callable` - -**Input conversion (all positional args):** -| Caller type | Converted to | -|---|---| -| `list` | `np.ndarray` | -| `torch.Tensor` | `np.ndarray` via `.detach().cpu().numpy()` | -| `pd.DataFrame` | `np.ndarray` via `.to_numpy().squeeze()` | -| `pd.Series` | `np.ndarray` via `.to_numpy().squeeze()` | -| `xr.DataArray` | `np.ndarray` via `np.array(data)` | -| `int`, `float`, `str`, `None` | passed through unchanged | -| `tuple`/`list` of `int` | passed through (dimension tuples) | - -**Output conversion** — when result is `np.ndarray` and first arg was: -| Original type | Output type | -|---|---| -| `list` | `list` via `.tolist()` | -| `torch.Tensor` | `torch.Tensor` via `torch.tensor(result)` | -| `pd.DataFrame` | `pd.DataFrame` | -| `pd.Series` | `pd.Series` | -| `np.ndarray` | `np.ndarray` (unchanged) | - -**`axis`/`dim` parameter translation:** `dim` kwargs are renamed to `axis` so numpy functions receive the correct parameter name. - -**Nested decorator guard:** Uses `is_nested_decorator()` stack inspection. If already inside a type-conversion wrapper, conversion is skipped to avoid double-conversion. - ---- - -### `torch_fn` - -```python -from scitex.decorators import torch_fn - -@torch_fn -def my_func(tensor, dim=None): - # tensor is guaranteed to be torch.Tensor here - return tensor.mean(dim=dim) -``` - -**Signature:** `torch_fn(func: Callable) -> Callable` - -**Input conversion:** same types as `numpy_fn` but targets `torch.Tensor`. - -- Automatically moves tensors to `cuda` when `torch.cuda.is_available()` is True. -- Emits `ConversionWarning` (via `_converters.ConversionWarning`) once per unique conversion (LRU-cached) when CUDA is used. -- `axis` kwargs are renamed to `dim` so torch functions receive the correct parameter name. -- Preserves `tuple`/`list` of integers (e.g., `dim=(0, 1)`) unchanged. - -**Output conversion** — when result is `torch.Tensor` and first arg was: -| Original type | Output type | -|---|---| -| `list` | `list` via `.detach().cpu().numpy().tolist()` | -| `np.ndarray` | `np.ndarray` via `.detach().cpu().numpy()` | -| `pd.DataFrame` | `pd.DataFrame` | -| `pd.Series` | `pd.Series` | -| `xr.DataArray` | `xr.DataArray` | -| `torch.Tensor` | `torch.Tensor` (unchanged) | - ---- - -### `pandas_fn` - -```python -from scitex.decorators import pandas_fn - -@pandas_fn -def my_func(df): - # df is guaranteed to be pd.DataFrame here - return df.describe() -``` - -**Signature:** `pandas_fn(func: Callable) -> Callable` - -**Input conversion targets `pd.DataFrame`:** -| Caller type | Converted to | -|---|---| -| `pd.Series` | `pd.DataFrame(series)` | -| `np.ndarray` | `pd.DataFrame(array)` | -| `list` | `pd.DataFrame(list)` (best-effort) | -| `torch.Tensor` | `pd.DataFrame(tensor.detach().cpu().numpy())` | -| `xr.DataArray` | `pd.DataFrame(data.values)` | -| `int`, `float`, `str` | passed through unchanged (scalars not wrapped) | - -**Output conversion** — when result is `pd.DataFrame` and first arg was: -| Original type | Output type | -|---|---| -| `list` | `list` via `.values.tolist()` | -| `np.ndarray` | `np.ndarray` via `.values` | -| `torch.Tensor` | `torch.Tensor` via `torch.tensor(result.values)` | -| `pd.Series` | `pd.Series` (first column) | -| `xr.DataArray` | `xr.DataArray(result.values)` | -| `pd.DataFrame` | `pd.DataFrame` (unchanged) | - ---- - -### `xarray_fn` - -```python -from scitex.decorators import xarray_fn - -@xarray_fn -def my_func(da): - # da is guaranteed to be xr.DataArray here - return da.mean(dim="time") -``` - -**Signature:** `xarray_fn(func: Callable) -> Callable` - -**Input conversion targets `xr.DataArray`.** - -**Strict assertion:** Unlike the other converters, `xarray_fn` asserts every positional arg is an `xr.DataArray` after conversion. Any unconvertible argument raises `AssertionError`. - -**Output conversion** — when result is `xr.DataArray` and first arg was: -| Original type | Output type | -|---|---| -| `list` | `list` via `.values.tolist()` | -| `np.ndarray` | `np.ndarray` via `.values` | -| `torch.Tensor` | `torch.Tensor` | -| `pd.DataFrame` | `pd.DataFrame(result.values)` | -| `pd.Series` | `pd.Series(result.values.flatten())` | - ---- - -### `signal_fn` - -```python -from scitex.decorators import signal_fn - -@signal_fn -def bandpass(signal, fs, low_hz, high_hz): - # Only `signal` (first arg) is converted to torch.Tensor. - # fs, low_hz, high_hz remain as Python scalars. - ... - return filtered # torch.Tensor or tuple of tensors -``` - -**Signature:** `signal_fn(func: Callable) -> Callable` - -`signal_fn` is a variant of `torch_fn` designed for DSP functions where: -- Only **the first argument** (the signal array) is converted to `torch.Tensor`. -- All remaining arguments (`fs`, `bands`, threshold values, etc.) are passed through **unchanged** as Python scalars or lists. - -**Output conversion** — supports both single-tensor and tuple returns: -- Single `torch.Tensor` — converted back to caller's original type. -- `tuple` — each tensor element is converted back individually; non-tensor elements pass through. - ---- - -## Converter Utilities - -Standalone helpers from `scitex.decorators._converters` (also exported from `stx.decorators`): - -```python -from scitex.decorators import to_numpy, to_torch, is_torch, is_cuda, ConversionWarning - -# Check types -is_torch(arr) # True if any arg is torch.Tensor -is_cuda(arr) # True if any arg is a CUDA tensor - -# Convert data -arr_np = to_numpy(tensor) # returns np.ndarray -arr_t = to_torch(arr) # returns torch.Tensor (auto-device) -``` - -`to_torch` signature: -```python -to_torch(*args, return_fn=_return_if, device=None, **kwargs) -``` -- `device` defaults to `"cuda"` if available, else `"cpu"`. -- `axis` kwargs are renamed to `dim` automatically. - -`to_numpy` signature: -```python -to_numpy(*args, return_fn=_return_if, **kwargs) -``` -- `dim` kwargs are renamed to `axis` automatically. - -`is_nested_decorator()` — inspects the call stack for nested `wrapper` frames with `_current_decorator` locals to detect multi-decorator stacking and prevent double-conversion. diff --git a/src/scitex/decorators/_timeout.py b/src/scitex/decorators/_timeout.py deleted file mode 100755 index 1e209417..00000000 --- a/src/scitex/decorators/_timeout.py +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Time-stamp: "2024-11-07 05:58:41 (ywatanabe)" -# File: ./scitex_repo/src/scitex/decorators/_timeout.py - -#!./env/bin/python3 -# -*- coding: utf-8 -*- -# Time-stamp: "2024-04-23 19:11:33" -# Author: Yusuke Watanabe (ywatanabe@scitex.ai) - -""" -This script does XYZ. -""" - -""" -Imports -""" - - -""" -Config -""" -# CONFIG = scitex.gen.load_configs() - -""" -Functions & Classes -""" -from multiprocessing import Process, Queue - - -def timeout(seconds=10, error_message="Timeout"): - def decorator(func): - def wrapper(*args, **kwargs): - def queue_wrapper(queue, args, kwargs): - result = func(*args, **kwargs) - queue.put(result) - - queue = Queue() - args_for_process = (queue, args, kwargs) - process = Process(target=queue_wrapper, args=args_for_process) - process.start() - process.join(timeout=seconds) - - if process.is_alive(): - process.terminate() - raise TimeoutError(error_message) - else: - return queue.get() - - return wrapper - - return decorator - - -# EOF diff --git a/src/scitex/decorators/_torch_fn.py b/src/scitex/decorators/_torch_fn.py deleted file mode 100755 index 392ae0da..00000000 --- a/src/scitex/decorators/_torch_fn.py +++ /dev/null @@ -1,155 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Timestamp: "2025-04-30 15:40:43 (ywatanabe)" -# File: /home/ywatanabe/proj/scitex_repo/src/scitex/decorators/_torch_fn.py -# ---------------------------------------- -import os - -__FILE__ = "./src/scitex/decorators/_torch_fn.py" -__DIR__ = os.path.dirname(__FILE__) -# ---------------------------------------- - -from functools import wraps -from typing import Any as _Any -from typing import Callable - -import numpy as np - -from ._converters import _return_always, is_nested_decorator, to_torch - - -def torch_fn(func: Callable) -> Callable: - """Decorator for PyTorch function compatibility. - - Automatically converts inputs to PyTorch tensors and handles various data types - gracefully. Preserves the original input type in the output. - - Features - -------- - - Converts inputs to PyTorch tensors - - Preserves scalar parameters (int, float, bool, str) - - Preserves dimension tuples like dim=(0, 1) - - Handles nested lists/tuples gracefully - - Automatically converts axis to dim for torch functions - - Applies device="cuda" if available - - Returns output in same type as input (numpy->numpy, pandas->pandas, etc.) - - Parameters - ---------- - func : Callable - The function to decorate - - Returns - ------- - Callable - The decorated function - - Examples - -------- - >>> @torch_fn - ... def mean_squared(x, dim=None): - ... return (x ** 2).mean(dim=dim) - >>> - >>> # Works with numpy arrays - >>> result = mean_squared(np.array([1, 2, 3])) - >>> - >>> # Works with nested lists - >>> result = mean_squared([[1, 2], [3, 4]]) - >>> - >>> # Preserves dimension tuples - >>> result = mean_squared(data, dim=(0, 1)) - - Notes - ----- - For optimal performance with batch processing, apply torch_fn before batch_fn: - @batch_fn - @torch_fn - def my_function(x): ... - - Or use auto-ordering to handle this automatically. - """ - - @wraps(func) - def wrapper(*args: _Any, **kwargs: _Any) -> _Any: - # Skip conversion if already in a nested decorator context - if is_nested_decorator(): - results = func(*args, **kwargs) - return results - - # Set the current decorator context - wrapper._current_decorator = "torch_fn" - - # Store original object for type preservation - original_object = args[0] if args else None - - converted_args, converted_kwargs = to_torch( - *args, return_fn=_return_always, **kwargs - ) - - # Skip strict assertion for certain types that may not convert to tensors - # Instead, convert what we can and pass through what we can't - import torch - - validated_args = [] - for arg_index, arg in enumerate(converted_args): - if isinstance(arg, torch.Tensor): - validated_args.append(arg) - elif isinstance(arg, (int, float, str, type(None))): - # Pass through scalars and strings unchanged - validated_args.append(arg) - elif isinstance(arg, list) and all( - isinstance(item, torch.Tensor) for item in arg - ): - # List of tensors - pass through as is - validated_args.append(arg) - else: - # Try one more conversion attempt - try: - validated_args.append(torch.tensor(arg).float()) - except: - # If all else fails, pass through unchanged - validated_args.append(arg) - - results = func(*validated_args, **converted_kwargs) - - # Convert results back to original input types - import torch - - if isinstance(results, torch.Tensor): - if original_object is not None: - if isinstance(original_object, list): - return results.detach().cpu().numpy().tolist() - elif isinstance(original_object, np.ndarray): - return results.detach().cpu().numpy() - elif ( - hasattr(original_object, "__class__") - and original_object.__class__.__name__ == "DataFrame" - ): - import pandas as pd - - return pd.DataFrame(results.detach().cpu().numpy()) - elif ( - hasattr(original_object, "__class__") - and original_object.__class__.__name__ == "Series" - ): - import pandas as pd - - return pd.Series(results.detach().cpu().numpy().flatten()) - elif ( - hasattr(original_object, "__class__") - and original_object.__class__.__name__ == "DataArray" - ): - import xarray as xr - - return xr.DataArray(results.detach().cpu().numpy()) - return results - - return results - - # Mark as a wrapper for detection - wrapper._is_wrapper = True - wrapper._decorator_type = "torch_fn" - return wrapper - - -# EOF diff --git a/src/scitex/decorators/_wrap.py b/src/scitex/decorators/_wrap.py deleted file mode 100755 index 8f26e0be..00000000 --- a/src/scitex/decorators/_wrap.py +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Timestamp: "2025-05-01 09:16:13 (ywatanabe)" -# File: /home/ywatanabe/proj/scitex_repo/src/scitex/decorators/_wrap.py -# ---------------------------------------- -import os - -__FILE__ = "./src/scitex/decorators/_wrap.py" -__DIR__ = os.path.dirname(__FILE__) -# ---------------------------------------- - - -def wrap(func): - """Basic function wrapper that preserves function metadata. - Usage: - @wrap - def my_function(x): - return x + 1 - # Or manually: - def my_function(x): - return x + 1 - wrapped_func = wrap(my_function) - This wrapper is useful as a template for creating more complex decorators - or when you want to ensure function metadata is preserved. - """ - import functools - - @functools.wraps(func) - def wrapper(*args, **kwargs): - return func(*args, **kwargs) - - # Store reference to original function - wrapper._original_func = func - # Mark as a wrapper for detection - wrapper._is_wrapper = True - return wrapper - - -# EOF diff --git a/src/scitex/decorators/_xarray_fn.py b/src/scitex/decorators/_xarray_fn.py deleted file mode 100755 index 6de685df..00000000 --- a/src/scitex/decorators/_xarray_fn.py +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Timestamp: "2025-04-30 15:41:19 (ywatanabe)" -# File: /home/ywatanabe/proj/scitex_repo/src/scitex/decorators/_xarray_fn.py -# ---------------------------------------- -import os - -__FILE__ = "./src/scitex/decorators/_xarray_fn.py" -__DIR__ = os.path.dirname(__FILE__) -# ---------------------------------------- -from functools import wraps -from typing import Any as _Any -from typing import Callable - -import numpy as np - -from ._converters import is_nested_decorator - - -def xarray_fn(func: Callable) -> Callable: - @wraps(func) - def wrapper(*args: _Any, **kwargs: _Any) -> _Any: - # Skip conversion if already in a nested decorator context - if is_nested_decorator(): - results = func(*args, **kwargs) - return results - - # Set the current decorator context - wrapper._current_decorator = "xarray_fn" - - # Store original object for type preservation - original_object = args[0] if args else None - - # Convert args to xarray DataArrays - def to_xarray(data): - import pandas as pd - import torch - import xarray as xr - - if isinstance(data, xr.DataArray): - return data - elif isinstance(data, np.ndarray): - return xr.DataArray(data) - elif isinstance(data, list): - return xr.DataArray(data) - elif hasattr(data, "__class__") and data.__class__.__name__ == "Tensor": - return xr.DataArray(data.detach().cpu().numpy()) - elif hasattr(data, "__class__") and data.__class__.__name__ == "DataFrame": - return xr.DataArray(data.values) - elif hasattr(data, "__class__") and data.__class__.__name__ == "Series": - return xr.DataArray(data.values) - else: - return xr.DataArray([data]) - - converted_args = [to_xarray(arg) for arg in args] - converted_kwargs = {k: to_xarray(v) for k, v in kwargs.items()} - - # Assertion to ensure all args are converted to xarray DataArrays - import xarray as xr - - for arg_index, arg in enumerate(converted_args): - assert isinstance( - arg, xr.DataArray - ), f"Argument {arg_index} not converted to DataArray: {type(arg)}" - - results = func(*converted_args, **converted_kwargs) - - # Convert results back to original input types - import xarray as xr - - if isinstance(results, xr.DataArray): - if original_object is not None: - if isinstance(original_object, list): - return results.values.tolist() - elif isinstance(original_object, np.ndarray): - return results.values - elif ( - hasattr(original_object, "__class__") - and original_object.__class__.__name__ == "Tensor" - ): - import torch - - return torch.tensor(results.values) - elif ( - hasattr(original_object, "__class__") - and original_object.__class__.__name__ == "DataFrame" - ): - import pandas as pd - - return pd.DataFrame(results.values) - elif ( - hasattr(original_object, "__class__") - and original_object.__class__.__name__ == "Series" - ): - import pandas as pd - - return pd.Series(results.values.flatten()) - return results - - return results - - # Mark as a wrapper for detection - wrapper._is_wrapper = True - wrapper._decorator_type = "xarray_fn" - return wrapper - - -# EOF diff --git a/tests/scitex/decorators/test__auto_order.py b/tests/scitex/decorators/test__auto_order.py deleted file mode 100644 index 516cdd24..00000000 --- a/tests/scitex/decorators/test__auto_order.py +++ /dev/null @@ -1,398 +0,0 @@ -#!/usr/bin/env python3 -# Time-stamp: "2025-06-01 10:45:00 (ywatanabe)" -# File: ./scitex_repo/tests/scitex/decorators/test__auto_order.py - -"""Test auto-ordering decorator system""" - -import numpy as np -import pytest - -# Required for scitex.decorators module -pytest.importorskip("tqdm") - -# Optional dependencies -torch = pytest.importorskip("torch") -pd = pytest.importorskip("pandas") - -import scitex.decorators -from scitex.decorators import ( - batch_fn, - disable_auto_order, - enable_auto_order, - numpy_fn, - pandas_fn, - torch_fn, -) - - -class TestAutoOrder: - """Test auto-ordering functionality""" - - def setup_method(self): - """Reset to original decorators before each test""" - disable_auto_order() - - def teardown_method(self): - """Reset to original decorators after each test""" - disable_auto_order() - - def test_enable_disable(self): - """Test enable and disable functionality""" - # Enable auto-ordering - enable_auto_order() - - # Check that decorators were replaced - assert scitex.decorators.torch_fn.__class__.__name__ == "AutoOrderDecorator" - assert scitex.decorators.numpy_fn.__class__.__name__ == "AutoOrderDecorator" - assert scitex.decorators.pandas_fn.__class__.__name__ == "AutoOrderDecorator" - assert scitex.decorators.batch_fn.__class__.__name__ == "AutoOrderDecorator" - - # Disable auto-ordering - disable_auto_order() - - # Check that original decorators were restored - assert scitex.decorators.torch_fn.__name__ == "torch_fn" - assert scitex.decorators.numpy_fn.__name__ == "numpy_fn" - assert scitex.decorators.pandas_fn.__name__ == "pandas_fn" - assert scitex.decorators.batch_fn.__name__ == "batch_fn" - - def test_auto_ordering_torch_batch(self): - """Test that decorators are applied in correct order regardless of how written""" - enable_auto_order() - - # Must use scitex.decorators.* after enable_auto_order() to get auto-ordering versions - # Define functions with different decorator orders - @scitex.decorators.batch_fn - @scitex.decorators.torch_fn - def func1(x): - return x.mean() - - @scitex.decorators.torch_fn - @scitex.decorators.batch_fn - def func2(x): - return x.mean() - - # Both should work identically - data = np.random.randn(10, 5) - result1 = func1(data) - result2 = func2(data) - - # Results should be the same (both are numpy arrays due to input type) - np.testing.assert_allclose(result1, result2) - - def test_multiple_type_converters(self): - """Test handling of multiple type converters""" - enable_auto_order() - - @scitex.decorators.batch_fn - @scitex.decorators.numpy_fn - @scitex.decorators.torch_fn - def func(x): - # Should work with torch tensor input - return x.mean() - - # Test with torch tensor - data = torch.randn(10, 5) - result = func(data) - # With auto-ordering, the decorators are reordered, but the output type - # depends on the input type. Since input is torch, output is torch - assert isinstance(result, (torch.Tensor, np.ndarray, np.floating, float)) - - def test_complex_decorator_stacking(self): - """Test complex decorator stacking scenarios""" - enable_auto_order() - - @scitex.decorators.pandas_fn - @scitex.decorators.torch_fn - def complex_func(x): - # This would normally be problematic, but auto-ordering handles it - # Need to handle CUDA tensor - if isinstance(x, torch.Tensor) and x.is_cuda: - x = x.cpu() - return pd.Series(x.flatten()) - - # Test with numpy data to avoid CUDA issues - data = np.random.randn(8, 5) # 8 divides evenly into batches - result = complex_func(data) - assert isinstance(result, pd.Series) - - def test_delayed_application(self): - """Test that decorators are applied lazily on first call""" - enable_auto_order() - - call_count = 0 - - # Must use scitex.decorators.* after enable_auto_order() for auto-ordering - @scitex.decorators.batch_fn - @scitex.decorators.torch_fn - def counting_func(x): - nonlocal call_count - call_count += 1 - return x.sum() - - # Function should have pending decorators (from AutoOrderDecorator) - assert hasattr(counting_func, "_pending_decorators") - - # First call applies decorators - data = np.array([1, 2, 3]) - result = counting_func(data) - - # After first call, pending decorators should be gone - assert not hasattr(counting_func, "_pending_decorators") - assert hasattr(counting_func, "_final_func") - - def test_preserves_function_metadata(self): - """Test that function metadata is preserved""" - enable_auto_order() - - @scitex.decorators.batch_fn - @scitex.decorators.torch_fn - def documented_func(x): - """This is a documented function""" - return x * 2 - - assert documented_func.__doc__ == "This is a documented function" - assert documented_func.__name__ == "documented_func" - - -class TestAutoOrderIntegration: - """Test auto-ordering with real use cases""" - - def setup_method(self): - """Enable auto-ordering for integration tests""" - enable_auto_order() - - def teardown_method(self): - """Disable after tests""" - disable_auto_order() - - def test_stats_describe_with_auto_order(self): - """Test that stats.describe works with auto-ordering""" - from scitex.stats import describe - - # Test case with multi-dimensional tensor - features_pac_z = np.random.randn(87, 5, 50, 30) - tensor_input = torch.tensor(features_pac_z) - - # This should work without errors - out = describe(tensor_input, dim=(1, 2, 3)) - - assert out[0].shape == (87, 7) - assert len(out[1]) == 7 - - def test_nested_lists_with_auto_order(self): - """Test nested list handling with auto-ordering""" - - @scitex.decorators.torch_fn - def process_nested(x): - return x.mean() - - # Nested lists should work - nested_data = [[1, 2, 3], [4, 5, 6]] - result = process_nested(nested_data) - - # Result will be numpy since input was a list - expected = np.array(nested_data).mean() - np.testing.assert_allclose(result, expected) - - def test_scalar_preservation_with_auto_order(self): - """Test that scalars are preserved with auto-ordering""" - - @scitex.decorators.torch_fn - def scale_tensor(x, scale=2.5): - assert isinstance(scale, float) - return x * scale - - data = torch.tensor([1, 2, 3]) - result = scale_tensor(data, scale=3.0) - - expected = data * 3.0 - assert torch.allclose(result, expected) - - -if __name__ == "__main__": - import os - - import pytest - - pytest.main([os.path.abspath(__file__)]) - -# -------------------------------------------------------------------------------- -# Start of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_auto_order.py -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2025-06-01 10:30:00 (ywatanabe)" -# # File: ./scitex_repo/src/scitex/decorators/_auto_order.py -# -# """ -# Auto-ordering decorator system that enforces predefined order regardless of -# how decorators are written in code. -# -# The enforced order is: -# 1. Type conversion (innermost): torch_fn, numpy_fn, pandas_fn -# 2. Batch processing (outermost): batch_fn -# -# This uses a delayed application approach where decorators are collected -# and then applied in the correct order when the function is first called. -# -# Example -# ------- -# >>> from scitex.decorators import enable_auto_order -# >>> enable_auto_order() -# >>> -# >>> # These will all work identically: -# >>> @batch_fn -# >>> @torch_fn -# >>> def func1(x): -# ... return x.mean() -# >>> -# >>> @torch_fn -# >>> @batch_fn # Order doesn't matter! -# >>> def func2(x): -# ... return x.mean() -# -# The auto-ordering system eliminates decorator ordering complexity and -# prevents common errors from incorrect decorator stacking. -# """ -# -# from functools import wraps -# from typing import Callable, List, Tuple, Any -# -# # Import original decorators -# from ._torch_fn import torch_fn as _orig_torch_fn -# from ._numpy_fn import numpy_fn as _orig_numpy_fn -# from ._pandas_fn import pandas_fn as _orig_pandas_fn -# from ._batch_fn import batch_fn as _orig_batch_fn -# -# -# # Decorator priority (higher = inner/applied first) -# DECORATOR_PRIORITY = { -# "torch_fn": 100, -# "numpy_fn": 100, -# "pandas_fn": 100, -# "batch_fn": 10, -# } -# -# # Original decorator mapping -# ORIGINAL_DECORATORS = { -# "torch_fn": _orig_torch_fn, -# "numpy_fn": _orig_numpy_fn, -# "pandas_fn": _orig_pandas_fn, -# "batch_fn": _orig_batch_fn, -# } -# -# -# class AutoOrderDecorator: -# """Decorator that collects and applies decorators in predefined order.""" -# -# def __init__(self, name: str): -# self.name = name -# self.priority = DECORATOR_PRIORITY[name] -# self.original = ORIGINAL_DECORATORS[name] -# -# def __call__(self, func: Callable) -> Callable: -# # Initialize or get pending decorators list -# if not hasattr(func, "_pending_decorators"): -# # First decorator - create the wrapper -# original_func = func -# -# @wraps(func) -# def auto_ordered_wrapper(*args, **kwargs): -# # On first call, apply decorators in correct order -# if hasattr(auto_ordered_wrapper, "_pending_decorators"): -# # Sort by priority (descending = innermost first) -# decorators = sorted( -# auto_ordered_wrapper._pending_decorators, -# key=lambda x: x[1], -# reverse=True, -# ) -# -# # Apply decorators in order -# final_func = original_func -# for dec_name, _, dec_func in decorators: -# final_func = dec_func(final_func) -# -# # Replace this wrapper with the final decorated function -# auto_ordered_wrapper._final_func = final_func -# delattr(auto_ordered_wrapper, "_pending_decorators") -# -# # Call the final decorated function -# if hasattr(auto_ordered_wrapper, "_final_func"): -# return auto_ordered_wrapper._final_func(*args, **kwargs) -# else: -# return original_func(*args, **kwargs) -# -# auto_ordered_wrapper._pending_decorators = [] -# func = auto_ordered_wrapper -# -# # Add this decorator to pending list -# func._pending_decorators.append((self.name, self.priority, self.original)) -# -# return func -# -# -# # Create auto-ordering versions -# torch_fn = AutoOrderDecorator("torch_fn") -# numpy_fn = AutoOrderDecorator("numpy_fn") -# pandas_fn = AutoOrderDecorator("pandas_fn") -# batch_fn = AutoOrderDecorator("batch_fn") -# -# -# # Enable auto-ordering globally -# def enable_auto_order(): -# """ -# Enable auto-ordering for all decorators in the scitex.decorators module. -# -# This replaces the standard decorators with auto-ordering versions. -# -# Example -# ------- -# >>> import scitex -# >>> scitex.decorators.enable_auto_order() -# >>> -# >>> # Now decorators will auto-order regardless of how they're written -# >>> @scitex.decorators.batch_fn -# >>> @scitex.decorators.torch_fn -# >>> def my_func(x): -# ... return x.mean() -# """ -# import scitex.decorators as decorators_module -# -# # Replace with auto-ordering versions -# decorators_module.torch_fn = torch_fn -# decorators_module.numpy_fn = numpy_fn -# decorators_module.pandas_fn = pandas_fn -# decorators_module.batch_fn = batch_fn -# -# print("Auto-ordering enabled for scitex decorators!") -# print("Decorators will now apply in predefined order:") -# print(" 1. Type conversion (torch_fn, numpy_fn, pandas_fn)") -# print(" 2. Batch processing (batch_fn)") -# -# -# def disable_auto_order(): -# """Disable auto-ordering and restore original decorators.""" -# import scitex.decorators as decorators_module -# -# # Restore original decorators -# decorators_module.torch_fn = _orig_torch_fn -# decorators_module.numpy_fn = _orig_numpy_fn -# decorators_module.pandas_fn = _orig_pandas_fn -# decorators_module.batch_fn = _orig_batch_fn -# -# print("Auto-ordering disabled. Using original decorators.") -# -# -# __all__ = [ -# "torch_fn", -# "numpy_fn", -# "pandas_fn", -# "batch_fn", -# "enable_auto_order", -# "disable_auto_order", -# ] - -# -------------------------------------------------------------------------------- -# End of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_auto_order.py -# -------------------------------------------------------------------------------- diff --git a/tests/scitex/decorators/test__batch_fn.py b/tests/scitex/decorators/test__batch_fn.py deleted file mode 100644 index 75c4ef8f..00000000 --- a/tests/scitex/decorators/test__batch_fn.py +++ /dev/null @@ -1,484 +0,0 @@ -#!/usr/bin/env python3 -# Time-stamp: "2026-01-04 21:10:00 (ywatanabe)" -# File: ./tests/scitex/decorators/test__batch_fn.py - -"""Test batch_fn decorator functionality. - -The batch_fn decorator is designed for memory-efficient processing of large datasets. -It splits input data into batches, processes each batch independently, and combines -results by stacking. This is useful when data doesn't fit in memory all at once. - -Key behaviors: -- Splits data along axis 0 (first dimension) -- Processes each batch independently -- Combines results via vstack (concatenation along axis 0) -- Suitable for row-wise operations, NOT global aggregations -""" - -import numpy as np -import pytest - -# Required for scitex.decorators module -pytest.importorskip("tqdm") - -# Optional dependencies -torch = pytest.importorskip("torch") -pd = pytest.importorskip("pandas") - -from scitex.decorators import batch_fn, numpy_fn, torch_fn - - -class TestBatchFn: - """Test batch_fn decorator""" - - def test_basic_functionality(self): - """Test basic batch processing preserves data through batching""" - - @batch_fn - def double_rows(x, batch_size=4): - # Returns 2D array - each row doubled - return x * 2 - - # Use 12 rows with batch_size=3 for even batches (4 batches of 3) - data = np.random.randn(12, 5) - result = double_rows(data, batch_size=3) - expected = data * 2 - - np.testing.assert_allclose(result, expected) - - def test_default_batch_size(self): - """Test default batch_size=4""" - call_count = 0 - - @batch_fn - def count_calls(x, batch_size=4): - nonlocal call_count - call_count += 1 - return x # Identity - returns batch as-is - - data = np.arange(10).reshape(-1, 1) # 2D for vstack compatibility - result = count_calls(data) # No batch_size specified, uses default 4 - - # With 10 elements and batch_size=4, should have 3 calls (4+4+2) - assert call_count == 3 - np.testing.assert_array_equal(result.flatten(), data.flatten()) - - def test_batch_size_larger_than_data(self): - """Test batch_size larger than data processes all at once""" - call_count = 0 - - @batch_fn - def count_calls(x, batch_size=4): - nonlocal call_count - call_count += 1 - return x - - data = np.arange(5).reshape(-1, 1) - result = count_calls(data, batch_size=10) - - # Data smaller than batch_size, should only call once - assert call_count == 1 - np.testing.assert_array_equal(result.flatten(), data.flatten()) - - def test_batch_processing_produces_per_batch_results(self): - """Test that batch processing produces independent per-batch results""" - - @batch_fn - def batch_mean(x, batch_size=4): - # Return mean of each batch (scalar per batch) - return np.mean(x) - - data = np.arange(12).reshape(-1).astype(float) - result = batch_mean(data, batch_size=4) - - # With 12 elements split into 3 batches of 4: - # batch1: [0,1,2,3] mean = 1.5 - # batch2: [4,5,6,7] mean = 5.5 - # batch3: [8,9,10,11] mean = 9.5 - expected = np.array([1.5, 5.5, 9.5]) - np.testing.assert_allclose(result, expected) - - def test_tuple_results(self): - """Test handling of tuple results with 2D outputs""" - - @batch_fn - def transform_data(x, batch_size=4): - # Returns tuple of 2D arrays - return x * 2, x + 1 - - # Use 12 rows with batch_size=3 for even batches - data = np.random.randn(12, 5) - doubled, incremented = transform_data(data, batch_size=3) - - np.testing.assert_allclose(doubled, data * 2) - np.testing.assert_allclose(incremented, data + 1) - - def test_mixed_tuple_results(self): - """Test handling of tuples with mixed types (array + non-stackable)""" - - @batch_fn - def describe_rows(x, batch_size=4): - # Return 2D array and a list - transformed = x * 2 - labels = ["doubled"] # Non-stackable (list), same for all batches - return transformed, labels - - # Use 12 rows with batch_size=3 for even batches - data = np.random.randn(12, 5) - transformed, labels = describe_rows(data, batch_size=3) - - np.testing.assert_allclose(transformed, data * 2) - # Non-tensor elements use first batch value - assert labels == ["doubled"] - - def test_torch_tensor_2d_results(self): - """Test handling of torch tensor with 2D results""" - - @batch_fn - @torch_fn - def double_tensor(x, batch_size=4): - # Returns 2D tensor - preserves shape - return x * 2 - - # Use 12 rows with batch_size=3 for even batches - data = torch.randn(12, 5) - result = double_tensor(data, batch_size=3) - expected = data * 2 - - assert torch.allclose(result, expected) - - def test_torch_tensor_multidim_results(self): - """Test handling of torch tensor multidimensional results""" - - @batch_fn - @torch_fn - def reduce_to_2d(x, batch_size=4): - # Each input row (N, 5) -> reduced row (N, 2) - return x[:, :2] # Keep first 2 columns - - # Use 12 rows with batch_size=3 for even batches - data = torch.randn(12, 5) - result = reduce_to_2d(data, batch_size=3) - expected = data[:, :2] - - assert torch.allclose(result, expected) - - def test_parameter_compatibility(self): - """Test that batch_size is only passed to functions that accept it""" - - @batch_fn - def no_batch_param(x): - # This function doesn't accept batch_size - return x * 2 # Row-wise operation - - # Use 12 rows with batch_size=3 for even batches - data = np.random.randn(12, 3) - # Should work without error - result = no_batch_param(data, batch_size=3) - expected = data * 2 - - np.testing.assert_allclose(result, expected) - - def test_with_kwargs(self): - """Test batch processing with additional kwargs""" - - @batch_fn - def scale_rows(x, scale=1.0, batch_size=4): - return x * scale - - # Use 12 rows with batch_size=3 for even batches - data = np.random.randn(12, 5) - scale = 2.5 - - result = scale_rows(data, scale=scale, batch_size=3) - expected = data * scale - - np.testing.assert_allclose(result, expected) - - def test_empty_input(self): - """Test handling of empty input""" - - @batch_fn - def process(x, batch_size=4): - return x * 2 - - data = np.array([]) - result = process(data) - - assert len(result) == 0 - - def test_uneven_batches(self): - """Test handling of uneven batch sizes""" - batch_sizes_seen = [] - - @batch_fn - def track_batch_size(x, batch_size=4): - batch_sizes_seen.append(len(x)) - return x.reshape(-1, 1) # Ensure 2D for vstack - - data = np.arange(10) # 10 elements - result = track_batch_size(data, batch_size=4) - - # Should see batches of size 4, 4, 2 - assert batch_sizes_seen == [4, 4, 2] - np.testing.assert_array_equal(result.flatten(), data) - - def test_2d_array_processing(self): - """Test that 2D arrays are processed correctly row-by-row""" - - @batch_fn - def normalize_rows(x, batch_size=4): - # Normalize each row to have mean 0 - return x - x.mean(axis=1, keepdims=True) - - # Use 12 rows with batch_size=3 for even batches - data = np.random.randn(12, 5) - result = normalize_rows(data, batch_size=3) - expected = data - data.mean(axis=1, keepdims=True) - - np.testing.assert_allclose(result, expected) - - -class TestBatchFnWithOtherDecorators: - """Test batch_fn combined with other decorators""" - - def test_with_torch_fn(self): - """Test batch_fn with torch_fn for 2D operations""" - - @batch_fn - @torch_fn - def torch_scale(x, batch_size=4): - # Returns 2D tensor - preserves shape - return x * 3 - - # Use 12 rows with batch_size=3 for even batches - data = np.random.randn(12, 5) - result = torch_scale(data, batch_size=3) - expected = data * 3 - - np.testing.assert_allclose(result, expected, rtol=1e-6) - - def test_with_numpy_fn(self): - """Test batch_fn with numpy_fn for 2D operations""" - - @batch_fn - @numpy_fn - def numpy_scale(x, batch_size=4): - # Returns 2D array - preserves shape - return x * 2.5 - - # Use 12 rows with batch_size=3 for even batches - data = torch.randn(12, 5) - result = numpy_scale(data, batch_size=3) - expected = data.numpy() * 2.5 - - np.testing.assert_allclose(result, expected, rtol=1e-6) - - def test_nested_decorator_context(self): - """Test nested decorator context handling""" - - @batch_fn - @torch_fn - def nested_func(x, batch_size=4): - # Returns 2D tensor - preserves shape - return x + 1 - - # Use 12 rows with batch_size=3 for even batches - data = torch.randn(12, 5) - result = nested_func(data, batch_size=3) - expected = data + 1 - assert torch.allclose(result, expected) - - -class TestBatchFnEdgeCases: - """Test edge cases for batch_fn""" - - def test_single_row(self): - """Test with single row input""" - - @batch_fn - def process(x, batch_size=4): - return x * 2 - - data = np.array([[1, 2, 3]]) # Single row - result = process(data, batch_size=4) - - np.testing.assert_array_equal(result, data * 2) - - def test_exact_batch_boundary(self): - """Test when data size is exact multiple of batch_size""" - call_count = 0 - - @batch_fn - def count_calls(x, batch_size=4): - nonlocal call_count - call_count += 1 - return x - - data = np.arange(12).reshape(-1, 1) # Exactly 3 batches of 4 - result = count_calls(data, batch_size=4) - - assert call_count == 3 - np.testing.assert_array_equal(result.flatten(), data.flatten()) - - def test_preserves_dtype(self): - """Test that dtype is preserved through batching""" - - @batch_fn - def identity(x, batch_size=4): - return x - - data = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32) - result = identity(data, batch_size=2) - - assert result.dtype == np.float32 - - -if __name__ == "__main__": - import os - - import pytest - - pytest.main([os.path.abspath(__file__)]) - -# -------------------------------------------------------------------------------- -# Start of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_batch_fn.py -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Timestamp: "2025-05-01 09:18:26 (ywatanabe)" -# # File: /home/ywatanabe/proj/scitex_repo/src/scitex/decorators/_batch_fn.py -# # ---------------------------------------- -# import os -# -# __FILE__ = "./src/scitex/decorators/_batch_fn.py" -# __DIR__ = os.path.dirname(__FILE__) -# # ---------------------------------------- -# from typing import Any as _Any -# -# from functools import wraps -# from typing import Callable -# -# import numpy as np -# from tqdm import tqdm as _tqdm -# -# from ._converters import is_nested_decorator -# -# -# def batch_fn(func: Callable) -> Callable: -# @wraps(func) -# def wrapper(x: _Any, *args: _Any, **kwargs: _Any) -> _Any: -# # Skip batching if in a nested decorator context and batch_size is already set -# if is_nested_decorator() and "batch_size" in kwargs: -# return func(x, *args, **kwargs) -# -# # Set the current decorator context -# wrapper._current_decorator = "batch_fn" -# -# # Mark that batch_fn has been applied -# if not hasattr(wrapper, "_decorator_order"): -# wrapper._decorator_order = [] -# wrapper._decorator_order.append("batch_fn") -# -# batch_size = int(kwargs.pop("batch_size", 4)) -# if len(x) <= batch_size: -# # Only pass batch_size if the function accepts it -# import inspect -# -# try: -# sig = inspect.signature(func) -# if "batch_size" in sig.parameters: -# return func(x, *args, **kwargs, batch_size=batch_size) -# else: -# return func(x, *args, **kwargs) -# except: -# # Fallback for wrapped functions -# return func(x, *args, **kwargs) -# -# n_batches = (len(x) + batch_size - 1) // batch_size -# results = [] -# -# for i_batch in _tqdm(range(n_batches)): -# start = i_batch * batch_size -# end = min((i_batch + 1) * batch_size, len(x)) -# -# # Only pass batch_size if the function accepts it -# import inspect -# -# try: -# sig = inspect.signature(func) -# if "batch_size" in sig.parameters: -# batch_result = func( -# x[start:end], *args, **kwargs, batch_size=batch_size -# ) -# else: -# batch_result = func(x[start:end], *args, **kwargs) -# except: -# # Fallback for wrapped functions -# batch_result = func(x[start:end], *args, **kwargs) -# -# import torch -# -# if isinstance(batch_result, torch.Tensor): -# batch_result = batch_result.cpu() -# elif isinstance(batch_result, tuple): -# batch_result = tuple( -# val.cpu() if isinstance(val, torch.Tensor) else val -# for val in batch_result -# ) -# -# results.append(batch_result) -# -# import torch -# -# if isinstance(results[0], tuple): -# n_vars = len(results[0]) -# combined_results = [] -# for i_var in range(n_vars): -# # Check if this element is stackable (tensor/array) or should be kept as-is -# first_elem = results[0][i_var] -# if isinstance(first_elem, (torch.Tensor, np.ndarray)): -# # Stack tensors/arrays -# if isinstance(first_elem, torch.Tensor): -# if first_elem.ndim == 0: -# combined = torch.stack([res[i_var] for res in results]) -# else: -# combined = torch.vstack([res[i_var] for res in results]) -# else: -# combined = np.vstack([res[i_var] for res in results]) -# combined_results.append(combined) -# else: -# # For non-tensor elements (like lists), just take the first one -# # (assuming they're all the same across batches) -# combined_results.append(first_elem) -# return tuple(combined_results) -# elif isinstance(results[0], torch.Tensor): -# # Check if results are 0-D tensors (scalars) -# if results[0].ndim == 0: -# return torch.stack(results) -# else: -# return torch.vstack(results) -# elif isinstance(results[0], np.ndarray): -# # Handle numpy arrays -# if results[0].ndim == 0: -# return np.array(results) -# else: -# return np.vstack(results) -# elif isinstance(results[0], (int, float)): -# # Handle scalar results -# return np.array(results) if len(results) > 1 else results[0] -# else: -# # For lists and other types -# return sum(results, []) -# -# # Mark as a wrapper for detection -# wrapper._is_wrapper = True -# wrapper._decorator_type = "batch_fn" -# return wrapper -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# End of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_batch_fn.py -# -------------------------------------------------------------------------------- diff --git a/tests/scitex/decorators/test__cache_disk.py b/tests/scitex/decorators/test__cache_disk.py deleted file mode 100644 index 52467672..00000000 --- a/tests/scitex/decorators/test__cache_disk.py +++ /dev/null @@ -1,504 +0,0 @@ -#!/usr/bin/env python3 -# Time-stamp: "2025-06-02 15:56:00 (ywatanabe)" -# File: ./scitex_repo/tests/scitex/decorators/test__cache_disk.py - -"""Tests for disk caching decorator functionality.""" - -import pytest - -# Required for scitex.decorators module -pytest.importorskip("tqdm") -import functools -import os -import shutil -import tempfile -import time -from unittest.mock import MagicMock, patch - -# joblib was changed from top-level eager import to lazy-import inside the -# decorator body (todo#442). The legacy tests in this file depend on -# joblib being available (they construct Memory objects directly). The new -# regression test for the lazy-import behavior lives in a sibling file -# (test__lazy_imports.py) so it can run on venvs without joblib. -joblib = pytest.importorskip( - "joblib", - reason="legacy cache_disk tests need joblib; lazy-import regression in test__lazy_imports.py", -) -Memory = joblib.Memory - - -def create_cache_disk_decorator(cache_dir): - """Create a fresh cache_disk decorator with a specific cache directory. - - This is used for testing to ensure each test has isolated cache. - """ - memory = Memory(cache_dir, verbose=0) - - def cache_disk(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - cached_func = memory.cache(func) - return cached_func(*args, **kwargs) - - return wrapper - - return cache_disk - - -class TestCacheDisk: - """Test cases for scitex.decorators._cache_disk module.""" - - def setup_method(self): - """Set up test fixtures before each test method.""" - # Create temporary directory for cache testing - self.temp_dir = tempfile.mkdtemp() - self.call_count = 0 - - def teardown_method(self): - """Clean up test fixtures after each test method.""" - # Clean up temporary directory - if os.path.exists(self.temp_dir): - shutil.rmtree(self.temp_dir) - - def test_cache_disk_import(self): - """Test that cache_disk can be imported successfully.""" - from scitex.decorators import cache_disk - - assert callable(cache_disk) - - def test_cache_disk_basic_functionality(self): - """Test basic disk caching functionality.""" - cache_disk = create_cache_disk_decorator(self.temp_dir) - call_count = 0 - - @cache_disk - def simple_func(x): - nonlocal call_count - call_count += 1 - return x * 2 - - # First call should execute the function - result1 = simple_func(5) - assert result1 == 10 - assert call_count == 1 - - # Second call with same argument should use disk cache - result2 = simple_func(5) - assert result2 == 10 - assert call_count == 1 # No additional function call - - # Call with different argument should execute function again - result3 = simple_func(10) - assert result3 == 20 - assert call_count == 2 - - def test_cache_disk_with_arguments(self): - """Test disk caching with multiple arguments.""" - cache_disk = create_cache_disk_decorator(self.temp_dir) - call_count = 0 - - @cache_disk - def multi_arg_func(x, y, z=10): - nonlocal call_count - call_count += 1 - return x + y + z - - # Test with positional arguments - result1 = multi_arg_func(1, 2) - assert result1 == 13 - assert call_count == 1 - - # Same arguments should use cache - result2 = multi_arg_func(1, 2) - assert result2 == 13 - assert call_count == 1 - - # Different arguments should execute function - result3 = multi_arg_func(1, 2, z=20) - assert result3 == 23 - assert call_count == 2 - - def test_cache_disk_with_keyword_arguments(self): - """Test disk caching with keyword arguments.""" - cache_disk = create_cache_disk_decorator(self.temp_dir) - call_count = 0 - - @cache_disk - def keyword_func(a, b=5, c=10): - nonlocal call_count - call_count += 1 - return a * b + c - - # Test with keyword arguments - result1 = keyword_func(2, b=3, c=4) - assert result1 == 10 - assert call_count == 1 - - # Same call should use cache - result2 = keyword_func(2, b=3, c=4) - assert result2 == 10 - assert call_count == 1 - - def test_cache_disk_return_types(self): - """Test disk caching with different return types.""" - cache_disk = create_cache_disk_decorator(self.temp_dir) - call_count = 0 - - @cache_disk - def return_various_types(type_name): - nonlocal call_count - call_count += 1 - - if type_name == "list": - return [1, 2, 3] - elif type_name == "dict": - return {"key": "value"} - elif type_name == "tuple": - return (1, 2, 3) - elif type_name == "none": - return None - else: - return type_name - - # Test list return - result1 = return_various_types("list") - assert result1 == [1, 2, 3] - assert call_count == 1 - - result2 = return_various_types("list") - assert result2 == [1, 2, 3] - assert call_count == 1 # Should use cache - - # Test dict return - result3 = return_various_types("dict") - assert result3 == {"key": "value"} - assert call_count == 2 - - # Test None return - result4 = return_various_types("none") - assert result4 is None - assert call_count == 3 - - def test_cache_disk_persistence_across_function_calls(self): - """Test that disk cache persists across different function instances.""" - cache_disk = create_cache_disk_decorator(self.temp_dir) - call_count = 0 - - @cache_disk - def persistent_func(x): - nonlocal call_count - call_count += 1 - return x**2 - - # First call - result1 = persistent_func(5) - assert result1 == 25 - assert call_count == 1 - - # Second call with same argument should use cache - result2 = persistent_func(5) - assert result2 == 25 - assert call_count == 1 # Should use cache - - def test_cache_disk_performance_improvement(self): - """Test that disk caching improves performance.""" - cache_disk = create_cache_disk_decorator(self.temp_dir) - - @cache_disk - def slow_function(n): - # Simulate slow computation - time.sleep(0.05) # 50ms delay - return n**2 - - # Time first call - start_time = time.time() - result1 = slow_function(5) - first_call_time = time.time() - start_time - - # Time second call (should be faster due to disk caching) - start_time = time.time() - result2 = slow_function(5) - second_call_time = time.time() - start_time - - assert result1 == result2 == 25 - # Second call should be significantly faster - assert second_call_time < first_call_time * 0.5 - - def test_cache_disk_uses_scitex_dir_environment_variable(self): - """Test that cache_disk respects SciTeX_DIR environment variable.""" - with tempfile.TemporaryDirectory() as temp_dir: - custom_scitex_dir = temp_dir + "/custom_scitex/" - - with patch.dict(os.environ, {"SciTeX_DIR": custom_scitex_dir}): - from scitex.decorators import cache_disk - - @cache_disk - def test_func(x): - return x * 2 - - # Call function to trigger cache creation - result = test_func(5) - assert result == 10 - - def test_cache_disk_default_cache_location(self): - """Test cache_disk uses default location when SciTeX_DIR not set.""" - cache_disk = create_cache_disk_decorator(self.temp_dir) - - @cache_disk - def test_func(x): - return x * 3 - - # Should work with default location - result = test_func(7) - assert result == 21 - - def test_cache_disk_with_complex_data_structures(self): - """Test disk caching with complex data structures.""" - cache_disk = create_cache_disk_decorator(self.temp_dir) - call_count = 0 - - @cache_disk - def complex_data_func(data_type): - nonlocal call_count - call_count += 1 - - if data_type == "nested_dict": - return {"level1": {"level2": {"values": [1, 2, 3, 4, 5]}}} - elif data_type == "nested_list": - return [[1, 2], [3, 4], [5, [6, 7]]] - else: - return {"simple": "data"} - - # Test complex nested dictionary - result1 = complex_data_func("nested_dict") - expected_dict = {"level1": {"level2": {"values": [1, 2, 3, 4, 5]}}} - assert result1 == expected_dict - assert call_count == 1 - - # Should use cache for same input - result2 = complex_data_func("nested_dict") - assert result2 == expected_dict - assert call_count == 1 - - def test_cache_disk_function_signature_preservation(self): - """Test that decorated function preserves original signature.""" - from scitex.decorators import cache_disk - - @cache_disk - def documented_func(x, y=10): - """This is a test function with documentation.""" - return x + y - - # Function should preserve name and docstring - assert documented_func.__name__ == "documented_func" - assert "test function with documentation" in documented_func.__doc__ - - def test_cache_disk_with_exceptions(self): - """Test disk caching behavior when function raises exceptions.""" - cache_disk = create_cache_disk_decorator(self.temp_dir) - call_count = 0 - - @cache_disk - def error_func(x): - nonlocal call_count - call_count += 1 - if x < 0: - raise ValueError("Negative value not allowed") - return x * 2 - - # Function that raises exception - with pytest.raises(ValueError): - error_func(-1) - assert call_count == 1 - - # Successful call should work - result = error_func(5) - assert result == 10 - assert call_count == 2 - - # Same successful call should use cache - result2 = error_func(5) - assert result2 == 10 - assert call_count == 2 - - @patch("scitex.decorators._cache_disk._Memory") - def test_cache_disk_joblib_memory_integration(self, mock_memory_class): - """Test integration with joblib.Memory.""" - mock_memory = MagicMock() - mock_memory_class.return_value = mock_memory - mock_cached_func = MagicMock(return_value=42) - mock_memory.cache.return_value = mock_cached_func - - from scitex.decorators import cache_disk - - @cache_disk - def test_func(x): - return x * 2 - - result = test_func(5) - - # Verify joblib Memory was instantiated - mock_memory_class.assert_called_once() - - # Verify cache method was called - mock_memory.cache.assert_called_once() - - # Verify cached function was called with correct arguments - mock_cached_func.assert_called_once_with(5) - assert result == 42 - - def test_cache_disk_cache_directory_creation(self): - """Test that cache directory is created correctly.""" - with tempfile.TemporaryDirectory() as temp_dir: - custom_scitex_dir = os.path.join(temp_dir, "test_scitex") - - with patch.dict(os.environ, {"SciTeX_DIR": custom_scitex_dir + "/"}): - from scitex.decorators import cache_disk - - @cache_disk - def test_func(x): - return x**2 - - # Call function to trigger cache setup - result = test_func(4) - assert result == 16 - - def test_cache_disk_multiple_functions(self): - """Test disk caching with multiple different functions.""" - cache_disk = create_cache_disk_decorator(self.temp_dir) - call_count_1 = 0 - call_count_2 = 0 - - @cache_disk - def func1(x): - nonlocal call_count_1 - call_count_1 += 1 - return x * 2 - - @cache_disk - def func2(x): - nonlocal call_count_2 - call_count_2 += 1 - return x * 3 - - # Test both functions - result1 = func1(5) - result2 = func2(5) - assert result1 == 10 - assert result2 == 15 - assert call_count_1 == 1 - assert call_count_2 == 1 - - # Test caching for both - result1_cached = func1(5) - result2_cached = func2(5) - assert result1_cached == 10 - assert result2_cached == 15 - assert call_count_1 == 1 # Should use cache - assert call_count_2 == 1 # Should use cache - - @pytest.mark.skip(reason="joblib.Memory cannot hash instance methods with 'self'") - def test_cache_disk_with_class_methods(self): - """Test disk caching with class methods. - - Note: This test is skipped because joblib.Memory cannot hash instance - methods that receive 'self' as an argument. This is a known limitation. - Use staticmethod or classmethod with cache_disk instead. - """ - pass - - def test_cache_disk_memory_verbose_setting(self): - """Test that joblib Memory is created with verbose=0.""" - with patch("scitex.decorators._cache_disk._Memory") as mock_memory: - from scitex.decorators import cache_disk - - @cache_disk - def test_func(x): - return x * 2 - - # The Memory object should be created with verbose=0 - # Check the call arguments - call_args = mock_memory.call_args - if call_args: - if len(call_args[0]) > 1: - # If verbose is passed as positional argument - assert call_args[0][1] == 0 - elif "verbose" in call_args[1]: - # If verbose is passed as keyword argument - assert call_args[1]["verbose"] == 0 - - def test_cache_disk_concurrent_access_safety(self): - """Test disk caching with concurrent-like access patterns.""" - cache_disk = create_cache_disk_decorator(self.temp_dir) - call_count = 0 - - @cache_disk - def test_func(x): - nonlocal call_count - call_count += 1 - return x * 2 - - # Simulate multiple rapid calls - results = [] - for _ in range(5): - results.append(test_func(42)) - - # All results should be the same - assert all(r == 84 for r in results) - # Function should only be called once due to caching - assert call_count == 1 - - -if __name__ == "__main__": - import os - - import pytest - - pytest.main([os.path.abspath(__file__)]) - -# -------------------------------------------------------------------------------- -# Start of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_cache_disk.py -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Timestamp: "2025-12-09 (ywatanabe)" -# # File: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_cache_disk.py -# # ---------------------------------------- -# from __future__ import annotations -# import os -# -# __FILE__ = "./src/scitex/decorators/_cache_disk.py" -# __DIR__ = os.path.dirname(__FILE__) -# # ---------------------------------------- -# -# import functools -# -# from joblib import Memory as _Memory -# -# from scitex.config import get_paths -# -# -# def cache_disk(func): -# """Disk caching decorator that uses joblib.Memory. -# -# Usage: -# @cache_disk -# def expensive_function(x): -# return x ** 2 -# """ -# cache_dir = str(get_paths().function_cache) -# memory = _Memory(cache_dir, verbose=0) -# -# @functools.wraps(func) -# def wrapper(*args, **kwargs): -# cached_func = memory.cache(func) -# return cached_func(*args, **kwargs) -# -# return wrapper -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# End of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_cache_disk.py -# -------------------------------------------------------------------------------- diff --git a/tests/scitex/decorators/test__cache_disk_async.py b/tests/scitex/decorators/test__cache_disk_async.py deleted file mode 100644 index f844fe17..00000000 --- a/tests/scitex/decorators/test__cache_disk_async.py +++ /dev/null @@ -1,181 +0,0 @@ -#!/usr/bin/env python3 -# Time-stamp: "2026-01-04 21:20:00 (ywatanabe)" -# File: ./tests/scitex/decorators/test__cache_disk_async.py - -"""Test cache_disk_async decorator functionality. - -The cache_disk_async decorator provides disk-based caching for async functions -using joblib.Memory. -""" - -import asyncio - -import pytest - -pytest.importorskip("joblib") - -from scitex.decorators import cache_disk_async - - -class TestCacheDiskAsync: - """Test cache_disk_async decorator""" - - def test_cache_disk_async_import(self): - """Test that cache_disk_async can be imported""" - from scitex.decorators import cache_disk_async - - assert callable(cache_disk_async) - - @pytest.mark.asyncio - async def test_cache_disk_async_basic_functionality(self): - """Test basic async function caching""" - call_count = 0 - - @cache_disk_async - async def async_square(x): - nonlocal call_count - call_count += 1 - await asyncio.sleep(0.01) - return x**2 - - # First call should execute the function - result1 = await async_square(5) - assert result1 == 25 - first_count = call_count - - # Second call with same args should use cache - result2 = await async_square(5) - assert result2 == 25 - # call_count may or may not increase depending on cache implementation - assert result1 == result2 - - @pytest.mark.asyncio - async def test_cache_disk_async_with_different_args(self): - """Test caching with different arguments""" - - @cache_disk_async - async def async_multiply(x, y): - await asyncio.sleep(0.01) - return x * y - - result1 = await async_multiply(3, 4) - result2 = await async_multiply(5, 6) - - assert result1 == 12 - assert result2 == 30 - - @pytest.mark.asyncio - async def test_cache_disk_async_with_kwargs(self): - """Test caching with keyword arguments""" - - @cache_disk_async - async def async_power(base, exponent=2): - await asyncio.sleep(0.01) - return base**exponent - - result1 = await async_power(3) - result2 = await async_power(3, exponent=3) - - assert result1 == 9 - assert result2 == 27 - - @pytest.mark.asyncio - async def test_cache_disk_async_return_types(self): - """Test caching with various return types""" - - @cache_disk_async - async def async_return_dict(key, value): - await asyncio.sleep(0.01) - return {key: value} - - result = await async_return_dict("test", 42) - assert result == {"test": 42} - - @pytest.mark.asyncio - async def test_cache_disk_async_preserves_function_metadata(self): - """Test that decorator preserves function metadata""" - - @cache_disk_async - async def documented_async_func(x): - """This is a documented async function""" - return x * 2 - - assert documented_async_func.__name__ == "documented_async_func" - assert documented_async_func.__doc__ == "This is a documented async function" - - @pytest.mark.asyncio - async def test_cache_disk_async_is_async(self): - """Test that decorated function is still async""" - import inspect - - @cache_disk_async - async def async_identity(x): - return x - - assert inspect.iscoroutinefunction(async_identity) - - -if __name__ == "__main__": - import os - - import pytest - - pytest.main([os.path.abspath(__file__)]) - -# -------------------------------------------------------------------------------- -# Start of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_cache_disk_async.py -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Timestamp: "2025-12-09 (ywatanabe)" -# # File: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_cache_disk_async.py -# # ---------------------------------------- -# from __future__ import annotations -# import os -# -# __FILE__ = "./src/scitex/decorators/_cache_disk_async.py" -# __DIR__ = os.path.dirname(__FILE__) -# # ---------------------------------------- -# """Async disk caching decorator using joblib.Memory.""" -# -# import asyncio -# import functools -# -# from joblib import Memory as _Memory -# -# from scitex.config import get_paths -# -# -# def cache_disk_async(func): -# """Disk caching decorator for async functions. -# -# Usage: -# @cache_disk_async -# async def expensive_async_function(x): -# await asyncio.sleep(1) -# return x ** 2 -# """ -# cache_dir = str(get_paths().function_cache) -# memory = _Memory(cache_dir, verbose=0) -# -# # Create sync wrapper for joblib -# def sync_wrapper(*args, **kwargs): -# return asyncio.run(func(*args, **kwargs)) -# -# cached_sync = memory.cache(sync_wrapper) -# -# @functools.wraps(func) -# async def async_wrapper(*args, **kwargs): -# # Run cached sync version in executor to avoid blocking -# loop = asyncio.get_event_loop() -# result = await loop.run_in_executor(None, lambda: cached_sync(*args, **kwargs)) -# return result -# -# return async_wrapper -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# End of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_cache_disk_async.py -# -------------------------------------------------------------------------------- diff --git a/tests/scitex/decorators/test__cache_mem.py b/tests/scitex/decorators/test__cache_mem.py deleted file mode 100644 index 4a04e16f..00000000 --- a/tests/scitex/decorators/test__cache_mem.py +++ /dev/null @@ -1,424 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Time-stamp: "2025-06-02 15:54:00 (ywatanabe)" -# File: ./scitex_repo/tests/scitex/decorators/test__cache_mem.py - -"""Tests for memory caching decorator functionality.""" - -import pytest - -# Required for scitex.decorators module -pytest.importorskip("tqdm") -import time -from unittest.mock import MagicMock, patch - - -class TestCacheMem: - """Test cases for scitex.decorators._cache_mem module.""" - - def setup_method(self): - """Set up test fixtures before each test method.""" - # Clear any existing cache before each test - self.call_count = 0 - - def test_cache_mem_import(self): - """Test that cache_mem can be imported successfully.""" - from scitex.decorators import cache_mem - - assert callable(cache_mem) - - def test_cache_mem_basic_functionality(self): - """Test basic caching functionality with simple function.""" - from scitex.decorators import cache_mem - - call_count = 0 - - @cache_mem - def simple_func(x): - nonlocal call_count - call_count += 1 - return x * 2 - - # First call should execute the function - result1 = simple_func(5) - assert result1 == 10 - assert call_count == 1 - - # Second call with same argument should use cache - result2 = simple_func(5) - assert result2 == 10 - assert call_count == 1 # No additional function call - - # Call with different argument should execute function again - result3 = simple_func(10) - assert result3 == 20 - assert call_count == 2 - - def test_cache_mem_multiple_arguments(self): - """Test caching with multiple arguments.""" - from scitex.decorators import cache_mem - - call_count = 0 - - @cache_mem - def multi_arg_func(x, y, z=10): - nonlocal call_count - call_count += 1 - return x + y + z - - # Test with positional arguments - result1 = multi_arg_func(1, 2) - assert result1 == 13 - assert call_count == 1 - - # Same arguments should use cache - result2 = multi_arg_func(1, 2) - assert result2 == 13 - assert call_count == 1 - - # Different z value should execute function - result3 = multi_arg_func(1, 2, z=20) - assert result3 == 23 - assert call_count == 2 - - # Keyword arguments should be cached separately - result4 = multi_arg_func(1, 2, z=20) - assert result4 == 23 - assert call_count == 2 # Should use cache - - def test_cache_mem_with_keyword_arguments(self): - """Test caching behavior with keyword arguments.""" - from scitex.decorators import cache_mem - - call_count = 0 - - @cache_mem - def keyword_func(a, b=5, c=10): - nonlocal call_count - call_count += 1 - return a * b + c - - # Test different ways of calling the same function - result1 = keyword_func(2, b=3, c=4) - assert result1 == 10 - assert call_count == 1 - - # Same call should use cache - result2 = keyword_func(2, b=3, c=4) - assert result2 == 10 - assert call_count == 1 - - # Different order of keyword args (may create different cache entries) - result3 = keyword_func(2, c=4, b=3) - assert result3 == 10 - assert call_count <= 2 # May be cached or may be a new entry - - def test_cache_mem_return_types(self): - """Test caching with different return types.""" - from scitex.decorators import cache_mem - - call_count = 0 - - @cache_mem - def return_various_types(type_name): - nonlocal call_count - call_count += 1 - - if type_name == "list": - return [1, 2, 3] - elif type_name == "dict": - return {"key": "value"} - elif type_name == "tuple": - return (1, 2, 3) - elif type_name == "none": - return None - else: - return type_name - - # Test list return - result1 = return_various_types("list") - assert result1 == [1, 2, 3] - assert call_count == 1 - - result2 = return_various_types("list") - assert result2 == [1, 2, 3] - assert call_count == 1 # Should use cache - - # Test dict return - result3 = return_various_types("dict") - assert result3 == {"key": "value"} - assert call_count == 2 - - # Test None return - result4 = return_various_types("none") - assert result4 is None - assert call_count == 3 - - result5 = return_various_types("none") - assert result5 is None - assert call_count == 3 # Should use cache - - def test_cache_mem_with_mutable_arguments(self): - """Test caching behavior with mutable arguments.""" - from scitex.decorators import cache_mem - - call_count = 0 - - @cache_mem - def process_list(lst): - nonlocal call_count - call_count += 1 - return sum(lst) - - # Test with list argument - list1 = [1, 2, 3] - result1 = process_list(tuple(list1)) # Convert to tuple for hashability - assert result1 == 6 - assert call_count == 1 - - # Same tuple should use cache - result2 = process_list(tuple(list1)) - assert result2 == 6 - assert call_count == 1 - - def test_cache_mem_performance_improvement(self): - """Test that caching actually improves performance.""" - from scitex.decorators import cache_mem - - @cache_mem - def slow_function(n): - # Simulate slow computation - time.sleep(0.01) # 10ms delay - return n**2 - - # Time first call - start_time = time.time() - result1 = slow_function(5) - first_call_time = time.time() - start_time - - # Time second call (should be much faster due to caching) - start_time = time.time() - result2 = slow_function(5) - second_call_time = time.time() - start_time - - assert result1 == result2 == 25 - assert second_call_time < first_call_time / 2 # Should be significantly faster - - def test_cache_mem_cache_info(self): - """Test cache_info functionality.""" - from scitex.decorators import cache_mem - - @cache_mem - def test_func(x): - return x * 2 - - # Check that cache_info is available - assert hasattr(test_func, "cache_info") - - # Initial cache should be empty - info = test_func.cache_info() - assert info.hits == 0 - assert info.misses == 0 - - # After first call - test_func(5) - info = test_func.cache_info() - assert info.hits == 0 - assert info.misses == 1 - - # After second call with same argument - test_func(5) - info = test_func.cache_info() - assert info.hits == 1 - assert info.misses == 1 - - def test_cache_mem_cache_clear(self): - """Test cache clearing functionality.""" - from scitex.decorators import cache_mem - - call_count = 0 - - @cache_mem - def test_func(x): - nonlocal call_count - call_count += 1 - return x * 2 - - # Make some calls - test_func(5) - test_func(5) # Should use cache - assert call_count == 1 - - # Clear cache - test_func.cache_clear() - - # Next call should execute function again - test_func(5) - assert call_count == 2 - - def test_cache_mem_unlimited_size(self): - """Test that cache has unlimited size (maxsize=None).""" - from scitex.decorators import cache_mem - - call_count = 0 - - @cache_mem - def test_func(x): - nonlocal call_count - call_count += 1 - return x - - # Make many unique calls - for i in range(1000): - test_func(i) - - assert call_count == 1000 - - # All previous calls should still be cached - for i in range(1000): - test_func(i) - - assert call_count == 1000 # No additional calls - - def test_cache_mem_exception_handling(self): - """Test caching behavior when function raises exceptions.""" - from scitex.decorators import cache_mem - - call_count = 0 - - @cache_mem - def error_func(x): - nonlocal call_count - call_count += 1 - if x < 0: - raise ValueError("Negative value") - return x * 2 - - # Function that raises exception - with pytest.raises(ValueError): - error_func(-1) - assert call_count == 1 - - # Exception should not be cached (function should be called again) - with pytest.raises(ValueError): - error_func(-1) - assert call_count == 2 - - # Successful call should be cached - result = error_func(5) - assert result == 10 - assert call_count == 3 - - result2 = error_func(5) - assert result2 == 10 - assert call_count == 3 # Should use cache - - def test_cache_mem_with_class_methods(self): - """Test caching with class methods.""" - from scitex.decorators import cache_mem - - class TestClass: - def __init__(self): - self.call_count = 0 - - @cache_mem - def cached_method(self, x): - self.call_count += 1 - return x * 2 - - obj = TestClass() - - # First call - result1 = obj.cached_method(5) - assert result1 == 10 - assert obj.call_count == 1 - - # Second call should use cache - result2 = obj.cached_method(5) - assert result2 == 10 - assert obj.call_count == 1 - - def test_cache_mem_function_attributes(self): - """Test that decorated function preserves important attributes.""" - from scitex.decorators import cache_mem - - @cache_mem - def documented_func(x): - """This is a test function.""" - return x - - # Function should have cache-related attributes - assert hasattr(documented_func, "cache_info") - assert hasattr(documented_func, "cache_clear") - assert callable(documented_func.cache_info) - assert callable(documented_func.cache_clear) - - def test_cache_mem_is_lru_cache_wrapper(self): - """Test that cache_mem is indeed a wrapper around lru_cache.""" - from functools import lru_cache - - from scitex.decorators import cache_mem - - # cache_mem should be an lru_cache with maxsize=None - # Test by checking the actual functionality rather than object equality - @cache_mem - def test_func(x): - return x * 2 - - # Should have lru_cache attributes - assert hasattr(test_func, "cache_info") - assert hasattr(test_func, "cache_clear") - - # Cache info should have infinite maxsize - info = test_func.cache_info() - assert info.maxsize is None - - def test_cache_mem_concurrent_access(self): - """Test caching behavior with concurrent-like access patterns.""" - from scitex.decorators import cache_mem - - call_count = 0 - - @cache_mem - def test_func(x): - nonlocal call_count - call_count += 1 - return x * 2 - - # Simulate multiple rapid calls - results = [] - for _ in range(10): - results.append(test_func(42)) - - # All results should be the same - assert all(r == 84 for r in results) - # Function should only be called once - assert call_count == 1 - - -if __name__ == "__main__": - import os - - import pytest - - pytest.main([os.path.abspath(__file__)]) - -# -------------------------------------------------------------------------------- -# Start of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_cache_mem.py -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-11-07 05:52:33 (ywatanabe)" -# # File: ./scitex_repo/src/scitex/decorators/_cache_mem.py -# -# from functools import lru_cache as _lru_cache -# -# # Memory cache -# cache_mem = _lru_cache(maxsize=None) -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# End of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_cache_mem.py -# -------------------------------------------------------------------------------- diff --git a/tests/scitex/decorators/test__combined.py b/tests/scitex/decorators/test__combined.py deleted file mode 100644 index 424b4294..00000000 --- a/tests/scitex/decorators/test__combined.py +++ /dev/null @@ -1,355 +0,0 @@ -#!/usr/bin/env python3 -# Time-stamp: "2025-06-02 15:00:00 (ywatanabe)" -# File: ./tests/scitex/decorators/test__combined.py - -import pytest - -# Required for scitex.decorators module -pytest.importorskip("tqdm") -import unittest.mock as mock - -import numpy as np -import pandas as pd - - -def test_combined_torch_batch_fn_exists(): - """Test that torch_batch_fn decorator exists and is callable.""" - from scitex.decorators import torch_batch_fn - - assert torch_batch_fn is not None - assert callable(torch_batch_fn) - - -def test_combined_numpy_batch_fn_exists(): - """Test that numpy_batch_fn decorator exists and is callable.""" - from scitex.decorators import numpy_batch_fn - - assert numpy_batch_fn is not None - assert callable(numpy_batch_fn) - - -def test_combined_pandas_batch_fn_exists(): - """Test that pandas_batch_fn decorator exists and is callable.""" - from scitex.decorators import pandas_batch_fn - - assert pandas_batch_fn is not None - assert callable(pandas_batch_fn) - - -@mock.patch("scitex.decorators._combined.torch_fn") -@mock.patch("scitex.decorators._combined.batch_fn") -def test_combined_torch_batch_fn_decorator_application(mock_batch_fn, mock_torch_fn): - """Test that torch_batch_fn applies decorators in correct order.""" - from scitex.decorators import torch_batch_fn - - # Mock the decorators to return identity functions - mock_torch_fn.return_value = lambda x: x - mock_batch_fn.return_value = lambda x: x - - @torch_batch_fn - def dummy_func(x): - return x - - # Verify both decorators were called - mock_torch_fn.assert_called_once() - mock_batch_fn.assert_called_once() - - -@mock.patch("scitex.decorators._combined.numpy_fn") -@mock.patch("scitex.decorators._combined.batch_fn") -def test_combined_numpy_batch_fn_decorator_application(mock_batch_fn, mock_numpy_fn): - """Test that numpy_batch_fn applies decorators in correct order.""" - from scitex.decorators import numpy_batch_fn - - # Mock the decorators to return identity functions - mock_numpy_fn.return_value = lambda x: x - mock_batch_fn.return_value = lambda x: x - - @numpy_batch_fn - def dummy_func(x): - return x - - # Verify both decorators were called - mock_numpy_fn.assert_called_once() - mock_batch_fn.assert_called_once() - - -@mock.patch("scitex.decorators._combined.pandas_fn") -@mock.patch("scitex.decorators._combined.batch_fn") -def test_combined_pandas_batch_fn_decorator_application(mock_batch_fn, mock_pandas_fn): - """Test that pandas_batch_fn applies decorators in correct order.""" - from scitex.decorators import pandas_batch_fn - - # Mock the decorators to return identity functions - mock_pandas_fn.return_value = lambda x: x - mock_batch_fn.return_value = lambda x: x - - @pandas_batch_fn - def dummy_func(x): - return x - - # Verify both decorators were called - mock_pandas_fn.assert_called_once() - mock_batch_fn.assert_called_once() - - -def test_combined_aliases_exist(): - """Test that decorator aliases exist and reference correct functions.""" - from scitex.decorators import ( - batch_numpy_fn, - batch_pandas_fn, - batch_torch_fn, - numpy_batch_fn, - pandas_batch_fn, - torch_batch_fn, - ) - - # Test aliases reference the same functions - assert batch_torch_fn is torch_batch_fn - assert batch_numpy_fn is numpy_batch_fn - assert batch_pandas_fn is pandas_batch_fn - - -def test_combined_function_metadata_preservation(): - """Test that decorators preserve function metadata.""" - from scitex.decorators import torch_batch_fn - - @torch_batch_fn - def test_function(x, y=1): - """Test function docstring.""" - return x + y - - # Test that function name and docstring are preserved - assert test_function.__name__ == "test_function" - assert "Test function docstring" in test_function.__doc__ - - -def test_combined_all_exports(): - """Test that __all__ contains expected combined decorator exports.""" - from scitex.decorators import __all__ - - # Combined decorators should be included in the full __all__ - expected_combined_exports = [ - "torch_batch_fn", - "numpy_batch_fn", - "pandas_batch_fn", - "batch_torch_fn", - "batch_numpy_fn", - "batch_pandas_fn", - ] - - for export in expected_combined_exports: - assert export in __all__, f"{export} should be in __all__" - - -def test_combined_imports_work(): - """Test that all imports from the module work correctly.""" - # Test individual imports - from scitex.decorators import ( - batch_numpy_fn, - batch_pandas_fn, - batch_torch_fn, - numpy_batch_fn, - pandas_batch_fn, - torch_batch_fn, - ) - - # Test that they are all callable - decorators = [ - torch_batch_fn, - numpy_batch_fn, - pandas_batch_fn, - batch_torch_fn, - batch_numpy_fn, - batch_pandas_fn, - ] - - for decorator in decorators: - assert callable(decorator) - - -@mock.patch("scitex.decorators._combined.torch_fn", side_effect=lambda x: x) -@mock.patch("scitex.decorators._combined.batch_fn", side_effect=lambda x: x) -def test_combined_torch_batch_fn_functionality(mock_batch_fn, mock_torch_fn): - """Test basic functionality of torch_batch_fn decorated function.""" - from scitex.decorators import torch_batch_fn - - @torch_batch_fn - def simple_function(x): - return x * 2 - - # Test that function can be called - result = simple_function(5) - assert result == 10 - - -@mock.patch("scitex.decorators._combined.numpy_fn", side_effect=lambda x: x) -@mock.patch("scitex.decorators._combined.batch_fn", side_effect=lambda x: x) -def test_combined_numpy_batch_fn_functionality(mock_batch_fn, mock_numpy_fn): - """Test basic functionality of numpy_batch_fn decorated function.""" - from scitex.decorators import numpy_batch_fn - - @numpy_batch_fn - def simple_function(x): - return x * 2 - - # Test that function can be called - result = simple_function(5) - assert result == 10 - - -@mock.patch("scitex.decorators._combined.pandas_fn", side_effect=lambda x: x) -@mock.patch("scitex.decorators._combined.batch_fn", side_effect=lambda x: x) -def test_combined_pandas_batch_fn_functionality(mock_batch_fn, mock_pandas_fn): - """Test basic functionality of pandas_batch_fn decorated function.""" - from scitex.decorators import pandas_batch_fn - - @pandas_batch_fn - def simple_function(x): - return x * 2 - - # Test that function can be called - result = simple_function(5) - assert result == 10 - - -def test_combined_decorator_dependencies(): - """Test that required decorator dependencies can be imported.""" - # Test that individual decorators can be imported - from scitex.decorators import batch_fn, numpy_fn, pandas_fn, torch_fn - - # Test that they are callable - assert callable(batch_fn) - assert callable(torch_fn) - assert callable(numpy_fn) - assert callable(pandas_fn) - - -def test_combined_wraps_import(): - """Test that functools.wraps is properly imported and used.""" - from functools import wraps - from typing import Callable - - # Test that required imports work - assert wraps is not None - assert Callable is not None - - -if __name__ == "__main__": - import os - - import pytest - - pytest.main([os.path.abspath(__file__)]) - -# -------------------------------------------------------------------------------- -# Start of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_combined.py -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2025-06-01 10:20:00 (ywatanabe)" -# # File: ./scitex_repo/src/scitex/decorators/_combined.py -# -# """ -# Combined decorators with predefined application order to reduce complexity. -# -# The order is always: type conversion → batch processing -# This ensures consistent behavior and reduces unexpected interactions. -# """ -# -# from functools import wraps -# from typing import Callable -# -# from ._batch_fn import batch_fn -# from ._torch_fn import torch_fn -# from ._numpy_fn import numpy_fn -# from ._pandas_fn import pandas_fn -# -# -# def torch_batch_fn(func: Callable) -> Callable: -# """ -# Combined decorator: torch_fn → batch_fn. -# -# Converts inputs to torch tensors, then processes in batches. -# This is the recommended order for PyTorch operations. -# -# Example -# ------- -# >>> @torch_batch_fn -# ... def process_data(x, dim=None): -# ... return x.mean(dim=dim) -# """ -# -# @wraps(func) -# @torch_fn -# @batch_fn -# def wrapper(*args, **kwargs): -# return func(*args, **kwargs) -# -# return wrapper -# -# -# def numpy_batch_fn(func: Callable) -> Callable: -# """ -# Combined decorator: numpy_fn → batch_fn. -# -# Converts inputs to numpy arrays, then processes in batches. -# This is the recommended order for NumPy operations. -# -# Example -# ------- -# >>> @numpy_batch_fn -# ... def process_data(x, axis=None): -# ... return np.mean(x, axis=axis) -# """ -# -# @wraps(func) -# @numpy_fn -# @batch_fn -# def wrapper(*args, **kwargs): -# return func(*args, **kwargs) -# -# return wrapper -# -# -# def pandas_batch_fn(func: Callable) -> Callable: -# """ -# Combined decorator: pandas_fn → batch_fn. -# -# Converts inputs to pandas DataFrames, then processes in batches. -# This is the recommended order for Pandas operations. -# -# Example -# ------- -# >>> @pandas_batch_fn -# ... def process_data(df): -# ... return df.describe() -# """ -# -# @wraps(func) -# @pandas_fn -# @batch_fn -# def wrapper(*args, **kwargs): -# return func(*args, **kwargs) -# -# return wrapper -# -# -# # Aliases for common use cases -# batch_torch_fn = torch_batch_fn # Alternative name -# batch_numpy_fn = numpy_batch_fn # Alternative name -# batch_pandas_fn = pandas_batch_fn # Alternative name -# -# -# __all__ = [ -# "torch_batch_fn", -# "numpy_batch_fn", -# "pandas_batch_fn", -# "batch_torch_fn", -# "batch_numpy_fn", -# "batch_pandas_fn", -# ] - -# -------------------------------------------------------------------------------- -# End of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_combined.py -# -------------------------------------------------------------------------------- diff --git a/tests/scitex/decorators/test__converters.py b/tests/scitex/decorators/test__converters.py deleted file mode 100644 index 02981b88..00000000 --- a/tests/scitex/decorators/test__converters.py +++ /dev/null @@ -1,319 +0,0 @@ -# -------------------------------------------------------------------------------- - -if __name__ == "__main__": - import os - - import pytest - - pytest.main([os.path.abspath(__file__)]) - -# -------------------------------------------------------------------------------- -# Start of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_converters.py -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Timestamp: "2025-04-30 14:58:43 (ywatanabe)" -# # File: /home/ywatanabe/proj/scitex_repo/src/scitex/decorators/_converters.py -# # ---------------------------------------- -# import os -# -# __FILE__ = "./src/scitex/decorators/_converters.py" -# __DIR__ = os.path.dirname(__FILE__) -# # ---------------------------------------- -# -# import functools -# import warnings -# from typing import Any as _Any -# from typing import Callable, Dict, Tuple, Union -# -# import numpy as np -# -# """ -# Core conversion utilities for handling data type transformations. -# Provides consistent conversion between NumPy, PyTorch, Pandas, and other formats. -# """ -# -# -# class ConversionWarning(UserWarning): -# pass -# -# -# # Configure warnings -# warnings.simplefilter("always", ConversionWarning) -# -# -# @functools.lru_cache(maxsize=None) -# def _cached_warning(message: str) -> None: -# """Cache warnings to avoid repetition.""" -# warnings.warn(message, category=ConversionWarning) -# -# -# def _conversion_warning(old: _Any, new) -> None: -# """Generate standardized type conversion warning.""" -# message = ( -# f"Converted from {type(old).__name__} to {type(new).__name__} ({new.device}). " -# f"Consider using {type(new).__name__} ({new.device}) as input for faster computation." -# ) -# _cached_warning(message) -# -# -# def _try_device(tensor, device: str): -# """Try to move tensor to specified device with graceful fallback.""" -# import torch -# -# if not isinstance(tensor, torch.Tensor): -# return tensor -# -# if tensor.device.type == device: -# return tensor -# -# try: -# return tensor.to(device) -# except RuntimeError as error: -# if "cuda" in str(error).lower() and device == "cuda": -# warnings.warn("CUDA memory insufficient, falling back to CPU.", UserWarning) -# return tensor.cpu() -# raise error -# -# -# def is_torch(*args: _Any, **kwargs: _Any) -> bool: -# """Check if any input is a PyTorch tensor.""" -# import torch -# -# return any(isinstance(arg, torch.Tensor) for arg in args) or any( -# isinstance(val, torch.Tensor) for val in kwargs.values() -# ) -# -# -# def is_cuda(*args: _Any, **kwargs: _Any) -> bool: -# """Check if any input is a CUDA tensor.""" -# import torch -# -# return any((isinstance(arg, torch.Tensor) and arg.is_cuda) for arg in args) or any( -# (isinstance(val, torch.Tensor) and val.is_cuda) for val in kwargs.values() -# ) -# -# -# def _return_always(*args: _Any, **kwargs: _Any) -> Tuple[Tuple, Dict]: -# """Always return args and kwargs as a tuple of (args, kwargs).""" -# return args, kwargs -# -# -# def _return_if(*args: _Any, **kwargs: _Any) -> Union[Tuple, Dict, None]: -# """Return args and/or kwargs depending on what's provided.""" -# if args and kwargs: -# return args, kwargs -# elif args: -# return args -# elif kwargs: -# return kwargs -# else: -# return None -# -# -# def to_torch( -# *args: _Any, -# return_fn: Callable = _return_if, -# device: str = None, -# **kwargs: _Any, -# ) -> _Any: -# """Convert various data types to PyTorch tensors.""" -# import torch -# -# if device is None: -# device = kwargs.get("device", "cuda" if torch.cuda.is_available() else "cpu") -# -# def _to_torch(data: _Any) -> _Any: -# """Internal conversion function for various data types.""" -# import torch -# import pandas as pd -# -# # Check for None -# if data is None: -# return None -# -# # Don't convert scalars (int, float, bool, str) - they should remain as is -# if isinstance(data, (int, float, bool, str)): -# return data -# -# # Handle collections -# if isinstance(data, (tuple, list)): -# # Check if it's a tuple/list of integers (like dimensions) -# if all(isinstance(item, int) for item in data): -# return data # Keep as is for dimension tuples -# -# # Check if it's a numeric array-like structure -# try: -# # Try to convert to tensor directly -# new_data = torch.tensor(data).float() -# new_data = _try_device(new_data, device) -# if device == "cuda": -# _conversion_warning(data, new_data) -# return new_data -# except: -# # If conversion fails, process items individually and return as tensor if possible -# converted_items = [_to_torch(item) for item in data if item is not None] -# # Try to stack if all items are tensors -# if converted_items and all( -# isinstance(item, torch.Tensor) for item in converted_items -# ): -# try: -# # Stack tensors along a new dimension -# return torch.stack(converted_items) -# except: -# # Return as list if stacking fails -# return converted_items -# return converted_items -# -# # Handle pandas types -# if isinstance(data, (pd.Series, pd.DataFrame)): -# new_data = torch.tensor(data.to_numpy()).squeeze().float() -# new_data = _try_device(new_data, device) -# if device == "cuda": -# _conversion_warning(data, new_data) -# return new_data -# -# # Handle arrays -# if isinstance(data, np.ndarray): -# new_data = torch.tensor(data).float() -# new_data = _try_device(new_data, device) -# if device == "cuda": -# _conversion_warning(data, new_data) -# return new_data -# -# # Handle xarray -# import xarray -# -# if ( -# hasattr(data, "__class__") -# and data.__class__.__module__ == "xarray.core.dataarray" -# and data.__class__.__name__ == "DataArray" -# ): -# new_data = torch.tensor(np.array(data)).float() -# new_data = _try_device(new_data, device) -# if device == "cuda": -# _conversion_warning(data, new_data) -# return new_data -# -# # Return as is for other types -# return data -# -# # Process args and kwargs -# converted_args = [_to_torch(arg) for arg in args if arg is not None] -# converted_kwargs = { -# key: _to_torch(val) for key, val in kwargs.items() if val is not None -# } -# -# # Handle axis/dim parameter conversion -# # Only convert axis to dim if dim is not already present -# if "axis" in converted_kwargs and "dim" not in converted_kwargs: -# converted_kwargs["dim"] = converted_kwargs.pop("axis") -# -# # Return in the specified format -# return return_fn(*converted_args, **converted_kwargs) -# -# -# def to_numpy(*args: _Any, return_fn: Callable = _return_if, **kwargs: _Any) -> _Any: -# """Convert various data types to NumPy arrays.""" -# -# def _to_numpy(data: _Any) -> _Any: -# """Internal conversion function for various data types.""" -# import torch -# import pandas as pd -# -# # Check for None -# if data is None: -# return None -# -# # Don't convert scalars (int, float, bool, str) - they should remain as is -# if isinstance(data, (int, float, bool, str)): -# return data -# -# # Handle pandas types -# if isinstance(data, (pd.Series, pd.DataFrame)): -# return data.to_numpy().squeeze() -# -# # Handle torch tensors -# if isinstance(data, torch.Tensor): -# return data.detach().cpu().numpy() -# -# # Handle lists and tuples -# if isinstance(data, (list, tuple)): -# # Check if it's a tuple/list of integers (like dimensions) -# if all(isinstance(item, int) for item in data): -# return data # Keep as is for dimension tuples -# -# # Check if it's a numeric array-like structure -# try: -# # Try to convert to numpy array directly -# return np.array(data) -# except: -# # If conversion fails, process items individually -# converted_items = [_to_numpy(item) for item in data if item is not None] -# # Try to stack if all items are numpy arrays -# if converted_items and all( -# isinstance(item, np.ndarray) for item in converted_items -# ): -# try: -# # Stack arrays along a new dimension -# return np.stack(converted_items) -# except: -# # Return as list if stacking fails -# return converted_items -# return converted_items -# -# # Return as is for other types -# return data -# -# # Process args and kwargs -# converted_args = [_to_numpy(arg) for arg in args if arg is not None] -# converted_kwargs = { -# key: _to_numpy(val) for key, val in kwargs.items() if val is not None -# } -# -# # Handle dim/axis parameter conversion -# # Only convert dim to axis if axis is not already present -# if "dim" in converted_kwargs and "axis" not in converted_kwargs: -# converted_kwargs["axis"] = converted_kwargs.pop("dim") -# -# # Return in the specified format -# return return_fn(*converted_args, **converted_kwargs) -# -# -# def is_nested_decorator(): -# """Check if we're in a nested decorator context.""" -# import inspect -# -# frame = inspect.currentframe() -# current_decorator = None -# decorator_chain = [] -# -# # Walk up the call stack -# while frame: -# if frame.f_code.co_name == "wrapper": -# # Check if this frame has local variables -# if frame.f_locals: -# # Try to get the self reference if it's a method -# if "self" in frame.f_locals: -# decorator_chain.append(frame.f_locals["self"]) -# -# # Check if the wrapper has marked itself with decorator info -# if "_current_decorator" in frame.f_locals: -# decorator_type = frame.f_locals["_current_decorator"] -# if current_decorator is None: -# current_decorator = decorator_type -# elif current_decorator != decorator_type: -# # Found a different decorator in the chain -# return True -# -# frame = frame.f_back -# -# # If we found more than one decorator in the chain -# return len(decorator_chain) > 1 -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# End of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_converters.py -# -------------------------------------------------------------------------------- diff --git a/tests/scitex/decorators/test__deprecated.py b/tests/scitex/decorators/test__deprecated.py deleted file mode 100644 index 277770e6..00000000 --- a/tests/scitex/decorators/test__deprecated.py +++ /dev/null @@ -1,651 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Time-stamp: "2025-06-02 16:05:00 (ywatanabe)" -# File: ./scitex_repo/tests/scitex/decorators/test__deprecated.py - -"""Tests for deprecated decorator functionality.""" - -import pytest - -# Required for scitex.decorators module -pytest.importorskip("tqdm") -import functools -import warnings -from unittest.mock import patch - - -class TestDeprecated: - """Test cases for scitex.decorators._deprecated module.""" - - def setup_method(self): - """Set up test fixtures before each test method.""" - # Clear any existing warnings filters - warnings.resetwarnings() - - def test_deprecated_import(self): - """Test that deprecated decorator can be imported successfully.""" - from scitex.decorators import deprecated - - assert callable(deprecated) - - def test_deprecated_basic_functionality(self): - """Test basic deprecated decorator functionality.""" - from scitex.decorators import deprecated - - @deprecated("This function is old") - def old_function(x): - return x * 2 - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - result = old_function(5) - - assert result == 10 - assert len(w) == 1 - assert issubclass(w[0].category, DeprecationWarning) - assert "old_function is deprecated: This function is old" in str( - w[0].message - ) - - def test_deprecated_without_reason(self): - """Test deprecated decorator without providing a reason.""" - from scitex.decorators import deprecated - - @deprecated() - def no_reason_function(): - return "test" - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - result = no_reason_function() - - assert result == "test" - assert len(w) == 1 - assert issubclass(w[0].category, DeprecationWarning) - assert "no_reason_function is deprecated: None" in str(w[0].message) - - def test_deprecated_with_none_reason(self): - """Test deprecated decorator with explicit None reason.""" - from scitex.decorators import deprecated - - @deprecated(None) - def none_reason_function(): - return "test" - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - result = none_reason_function() - - assert result == "test" - assert len(w) == 1 - assert "none_reason_function is deprecated: None" in str(w[0].message) - - def test_deprecated_with_complex_reason(self): - """Test deprecated decorator with complex reason string.""" - from scitex.decorators import deprecated - - complex_reason = "Use new_function() instead. This will be removed in v2.0. See documentation at example.com" - - @deprecated(complex_reason) - def complex_function(): - return "deprecated" - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - complex_function() - - assert len(w) == 1 - assert complex_reason in str(w[0].message) - - def test_deprecated_function_arguments(self): - """Test deprecated decorator with functions that have arguments.""" - from scitex.decorators import deprecated - - @deprecated("Use new_math_function") - def old_math_function(a, b, c=10): - return a + b + c - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - result = old_math_function(1, 2, c=3) - - assert result == 6 - assert len(w) == 1 - assert "old_math_function is deprecated" in str(w[0].message) - - def test_deprecated_function_kwargs(self): - """Test deprecated decorator with functions using *args and **kwargs.""" - from scitex.decorators import deprecated - - @deprecated("Flexible argument function deprecated") - def flexible_function(*args, **kwargs): - return sum(args) + sum(kwargs.values()) - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - result = flexible_function(1, 2, 3, x=4, y=5) - - assert result == 15 # 1+2+3+4+5 - assert len(w) == 1 - assert "flexible_function is deprecated" in str(w[0].message) - - def test_deprecated_function_with_exceptions(self): - """Test deprecated decorator when decorated function raises exceptions.""" - from scitex.decorators import deprecated - - @deprecated("This error function is deprecated") - def error_function(should_raise=True): - if should_raise: - raise ValueError("Test error") - return "success" - - # Test successful call - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - result = error_function(False) - - assert result == "success" - assert len(w) == 1 - assert "error_function is deprecated" in str(w[0].message) - - # Test exception is still raised - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - with pytest.raises(ValueError, match="Test error"): - error_function(True) - - # Warning should still be emitted before exception - assert len(w) == 1 - assert "error_function is deprecated" in str(w[0].message) - - def test_deprecated_multiple_calls(self): - """Test that each call to deprecated function emits a warning.""" - from scitex.decorators import deprecated - - @deprecated("Multi-call test") - def multi_call_function(): - return "called" - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - - # Call multiple times - for i in range(3): - result = multi_call_function() - assert result == "called" - - # Should have one warning per call - assert len(w) == 3 - for warning in w: - assert "multi_call_function is deprecated" in str(warning.message) - - def test_deprecated_function_attributes_preserved(self): - """Test that decorated function preserves original attributes.""" - from scitex.decorators import deprecated - - @deprecated("Function with docs") - def documented_function(x, y=5): - """This function adds two numbers. - - Args: - x (int): First number - y (int): Second number - - Returns: - int: Sum of x and y - """ - return x + y - - # Check that function attributes are preserved - assert documented_function.__name__ == "documented_function" - assert "adds two numbers" in documented_function.__doc__ - assert hasattr(documented_function, "__wrapped__") - - def test_deprecated_with_class_methods(self): - """Test deprecated decorator with class methods.""" - from scitex.decorators import deprecated - - class TestClass: - @deprecated("Method is deprecated") - def old_method(self, value): - return value * 2 - - @deprecated("Static method deprecated") - @staticmethod - def old_static_method(value): - return value * 3 - - @classmethod - @deprecated("Class method deprecated") - def old_class_method(cls, value): - return value * 4 - - obj = TestClass() - - # Test instance method - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - result = obj.old_method(5) - assert result == 10 - assert len(w) == 1 - assert "old_method is deprecated" in str(w[0].message) - - # Test static method - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - result = TestClass.old_static_method(5) - assert result == 15 - assert len(w) == 1 - assert "old_static_method is deprecated" in str(w[0].message) - - # Test class method - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - result = TestClass.old_class_method(5) - assert result == 20 - assert len(w) == 1 - assert "old_class_method is deprecated" in str(w[0].message) - - def test_deprecated_warning_stacklevel(self): - """Test that deprecated warnings have correct stack level.""" - from scitex.decorators import deprecated - - @deprecated("Stack level test") - def stacklevel_function(): - return "test" - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - - def caller_function(): - return stacklevel_function() - - caller_function() - - assert len(w) == 1 - # The warning should point to the caller, not the decorator - assert w[0].filename.endswith("test__deprecated.py") - - def test_deprecated_with_return_values(self): - """Test deprecated decorator preserves all return value types.""" - from scitex.decorators import deprecated - - @deprecated("Returns list") - def return_list(): - return [1, 2, 3] - - @deprecated("Returns dict") - def return_dict(): - return {"key": "value"} - - @deprecated("Returns None") - def return_none(): - return None - - @deprecated("Returns tuple") - def return_tuple(): - return (1, 2, 3) - - with warnings.catch_warnings(record=True): - warnings.simplefilter("always") - - assert return_list() == [1, 2, 3] - assert return_dict() == {"key": "value"} - assert return_none() is None - assert return_tuple() == (1, 2, 3) - - def test_deprecated_unicode_reason(self): - """Test deprecated decorator with unicode characters in reason.""" - from scitex.decorators import deprecated - - unicode_reason = "Função obsoleta. Use função_nova() em vez disso. 废弃的函数" - - @deprecated(unicode_reason) - def unicode_function(): - return "unicode test" - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - result = unicode_function() - - assert result == "unicode test" - assert len(w) == 1 - assert unicode_reason in str(w[0].message) - - def test_deprecated_very_long_reason(self): - """Test deprecated decorator with very long reason string.""" - from scitex.decorators import deprecated - - long_reason = ( - "This is a very long deprecation reason that explains in great detail why this function is deprecated and what alternatives should be used instead. " - * 10 - ) - - @deprecated(long_reason) - def long_reason_function(): - return "long reason test" - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - result = long_reason_function() - - assert result == "long reason test" - assert len(w) == 1 - assert long_reason in str(w[0].message) - - def test_deprecated_special_characters_in_reason(self): - """Test deprecated decorator with special characters in reason.""" - from scitex.decorators import deprecated - - special_reason = ( - "Function deprecated! Use new_func() -> str | None instead. Cost: $0.00" - ) - - @deprecated(special_reason) - def special_chars_function(): - return "special test" - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - result = special_chars_function() - - assert result == "special test" - assert len(w) == 1 - assert special_reason in str(w[0].message) - - def test_deprecated_with_generators(self): - """Test deprecated decorator with generator functions.""" - from scitex.decorators import deprecated - - @deprecated("Generator function deprecated") - def deprecated_generator(n): - for i in range(n): - yield i * 2 - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - gen = deprecated_generator(3) - results = list(gen) - - assert results == [0, 2, 4] - assert len(w) == 1 - assert "deprecated_generator is deprecated" in str(w[0].message) - - def test_deprecated_multiple_decorators(self): - """Test deprecated decorator when combined with other decorators.""" - from scitex.decorators import deprecated - - def double_result(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - result = func(*args, **kwargs) - return result * 2 - - return wrapper - - @deprecated("Multi-decorator test") - @double_result - def multi_decorated_function(x): - return x + 1 - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - result = multi_decorated_function(5) - - assert result == 12 # (5 + 1) * 2 - assert len(w) == 1 - assert "multi_decorated_function is deprecated" in str(w[0].message) - - def test_deprecated_warning_category(self): - """Test that deprecated decorator emits DeprecationWarning specifically.""" - from scitex.decorators import deprecated - - @deprecated("Category test") - def category_test_function(): - return "test" - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - category_test_function() - - assert len(w) == 1 - assert w[0].category == DeprecationWarning - - def test_deprecated_function_name_with_special_chars(self): - """Test deprecated decorator with function names containing special characters.""" - from scitex.decorators import deprecated - - @deprecated("Special name test") - def _private_function(): - return "private" - - @deprecated("Number name test") - def func_2(): - return "numbered" - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - - _private_function() - func_2() - - assert len(w) == 2 - assert "_private_function is deprecated" in str(w[0].message) - assert "func_2 is deprecated" in str(w[1].message) - - -if __name__ == "__main__": - import os - - import pytest - - pytest.main([os.path.abspath(__file__)]) - -# -------------------------------------------------------------------------------- -# Start of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_deprecated.py -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Timestamp: "2025-08-21 20:57:29 (ywatanabe)" -# # File: /home/ywatanabe/proj/scitex_repo/src/scitex/decorators/_deprecated.py -# # ---------------------------------------- -# from __future__ import annotations -# import os -# -# __FILE__ = __file__ -# __DIR__ = os.path.dirname(__FILE__) -# # ---------------------------------------- -# import functools -# import importlib -# import warnings -# -# -# def deprecated(reason=None, forward_to=None): -# """ -# A decorator to mark functions as deprecated. It will result in a warning being emitted -# when the function is used. -# -# Args: -# reason (str): A human-readable string explaining why this function was deprecated. -# forward_to (str): Optional module path to forward calls to (e.g., "..session.start"). -# If provided, calls will be forwarded to the new function instead of -# executing the original deprecated function. -# """ -# -# def decorator(func): -# if forward_to: -# # Create a forwarding wrapper with auto-generated docstring -# @functools.wraps(func) -# def new_func(*args, **kwargs): -# warnings.warn( -# f"{func.__name__} is deprecated: {reason}", -# DeprecationWarning, -# stacklevel=2, -# ) -# # Dynamic import and call forwarding -# module_path, function_name = forward_to.rsplit(".", 1) -# -# # Handle relative imports -# if module_path.startswith(".."): -# # Get the module where the function was defined (not the calling module) -# func_module = func.__module__ -# -# if func_module: -# # Convert relative import to absolute based on the function's module -# package_parts = func_module.split(".") -# # Count the number of dots to determine how many levels to go up -# level_count = 0 -# for char in module_path: -# if char == ".": -# level_count += 1 -# else: -# break -# -# # Remove the relative part and create absolute path -# if level_count > 0: -# base_package_parts = package_parts[:-level_count] -# if base_package_parts: -# base_package = ".".join(base_package_parts) -# relative_part = module_path.lstrip(".") -# module_path = ( -# base_package + "." + relative_part -# if relative_part -# else base_package -# ) -# else: -# # Can't go up that many levels, fallback to absolute -# module_path = module_path.lstrip(".") -# -# try: -# target_module = importlib.import_module(module_path) -# target_function = getattr(target_module, function_name) -# return target_function(*args, **kwargs) -# except (ImportError, AttributeError) as e: -# # Fallback to original function if forwarding fails -# warnings.warn( -# f"Failed to forward {func.__name__} to {forward_to}: {e}. " -# f"Using original deprecated implementation.", -# RuntimeWarning, -# stacklevel=2, -# ) -# return func(*args, **kwargs) -# -# # Auto-generate docstring for forwarding wrapper with target function's docstring -# original_name = func.__name__ -# new_location = forward_to.replace("..", "scitex.").lstrip(".") -# -# # Try to get the target function's docstring -# target_docstring = "" -# try: -# # Get the same target we'll forward to -# target_module_path, target_function_name = forward_to.rsplit(".", 1) -# -# # Handle relative imports for docstring retrieval -# if target_module_path.startswith(".."): -# func_module = func.__module__ -# if func_module: -# package_parts = func_module.split(".") -# level_count = 0 -# for char in target_module_path: -# if char == ".": -# level_count += 1 -# else: -# break -# -# if level_count > 0: -# base_package_parts = package_parts[:-level_count] -# if base_package_parts: -# base_package = ".".join(base_package_parts) -# relative_part = target_module_path.lstrip(".") -# target_module_path = ( -# base_package + "." + relative_part -# if relative_part -# else base_package -# ) -# else: -# target_module_path = target_module_path.lstrip(".") -# -# target_module = importlib.import_module(target_module_path) -# target_function = getattr(target_module, target_function_name) -# if target_function.__doc__: -# target_docstring = target_function.__doc__.strip() -# except (ImportError, AttributeError): -# pass # Fall back to basic docstring if target can't be imported -# -# # Create comprehensive docstring combining deprecation notice with target docs -# if target_docstring: -# forwarding_docstring = f"""**DEPRECATED: Use {new_location} instead** -# -# {target_docstring} -# -# Deprecation Notice -# ------------------ -# This function is deprecated and will be removed in a future version. -# Use `{new_location}` instead. This wrapper forwards all calls to the new function -# while displaying a deprecation warning. -# -# Parameters -# ---------- -# *args : tuple -# Positional arguments passed to {new_location} -# **kwargs : dict -# Keyword arguments passed to {new_location} -# -# Returns -# ------- -# Any -# Same return value as {new_location} -# -# Warns -# ----- -# DeprecationWarning -# Always warns that this function is deprecated -# """ -# else: -# # Fallback if target docstring unavailable -# forwarding_docstring = f"""**DEPRECATED: Use {new_location} instead** -# -# This function provides backward compatibility for existing code that uses -# {original_name}(). It forwards all calls to the new {new_location} -# function while displaying a deprecation warning. -# -# Parameters -# ---------- -# *args : tuple -# Positional arguments passed to {new_location} -# **kwargs : dict -# Keyword arguments passed to {new_location} -# -# Returns -# ------- -# Any -# Same return value as {new_location} -# -# Warns -# ----- -# DeprecationWarning -# Always warns that this function is deprecated -# """ -# new_func.__doc__ = forwarding_docstring -# return new_func -# else: -# # Original behavior for non-forwarding deprecation -# @functools.wraps(func) -# def new_func(*args, **kwargs): -# warnings.warn( -# f"{func.__name__} is deprecated: {reason}", -# DeprecationWarning, -# stacklevel=2, -# ) -# return func(*args, **kwargs) -# -# return new_func -# -# return decorator -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# End of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_deprecated.py -# -------------------------------------------------------------------------------- diff --git a/tests/scitex/decorators/test__lazy_imports.py b/tests/scitex/decorators/test__lazy_imports.py deleted file mode 100644 index 2ba66eed..00000000 --- a/tests/scitex/decorators/test__lazy_imports.py +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# File: tests/scitex/decorators/test__lazy_imports.py - -"""Regression tests for the joblib eager-import leak (todo#442). - -The bug: pre-fix, ``import scitex.decorators`` (and the transitive -``import scitex.io`` chain) raised ``ModuleNotFoundError: No module -named 'joblib'`` whenever joblib wasn't installed in the venv, because -``_cache_disk.py`` and ``_cache_disk_async.py`` had top-level -``from joblib import Memory as _Memory``. - -The fix: joblib is lazy-imported inside ``cache_disk()`` / -``cache_disk_async()`` function bodies only. ``import scitex.decorators`` -must succeed without joblib; only constructing the decorator (= calling -it on a target function) requires joblib. - -Same class of bug as #441 (flask), #279 (bs4), #443 (matplotlib). - -This test file is intentionally separate from ``test__cache_disk.py`` -because the latter eager-imports ``joblib.Memory`` for legacy tests and -gets skipped wholesale on no-joblib venvs — defeating the regression -guard. The tests below run unconditionally and only inspect module -source for the offending top-level import lines. -""" - -from __future__ import annotations - -import inspect - - -def test_cache_disk_module_has_no_top_level_joblib_import(): - """``_cache_disk.py`` must not import joblib at module scope.""" - from scitex.decorators import _cache_disk as mod - - src = inspect.getsource(mod) - for lineno, line in enumerate(src.splitlines(), 1): - if line.startswith("from joblib") or line.startswith("import joblib"): - raise AssertionError( - f"Top-level joblib import re-introduced in _cache_disk.py " - f"at line {lineno}: {line!r}.\n" - "Lazy-import inside cache_disk() is required to keep " - "`import scitex.decorators` working on venvs without " - "joblib (todo#442)." - ) - - -def test_cache_disk_async_module_has_no_top_level_joblib_import(): - """``_cache_disk_async.py`` must not import joblib at module scope.""" - from scitex.decorators import _cache_disk_async as mod - - src = inspect.getsource(mod) - for lineno, line in enumerate(src.splitlines(), 1): - if line.startswith("from joblib") or line.startswith("import joblib"): - raise AssertionError( - f"Top-level joblib import re-introduced in " - f"_cache_disk_async.py at line {lineno}: {line!r}.\n" - "Lazy-import inside cache_disk_async() is required " - "(todo#442)." - ) - - -def test_import_scitex_decorators_does_not_require_joblib(): - """The package ``scitex.decorators`` must import without joblib. - - This is the contract surfaced by todo#442. We assert by inspection - on the decorator source files (above) rather than runtime mocking, - because pytest's import system does not let us cleanly re-import - a module that the test runner has already loaded earlier in the - session. Source inspection is a sufficient regression guard: if - no top-level joblib import exists in either file, the contract - holds at import time. - """ - # Sanity: importing the package itself must not raise. (It may have - # already been imported in an earlier test; that's fine.) - import scitex.decorators # noqa: F401 - - -# EOF diff --git a/tests/scitex/decorators/test__not_implemented.py b/tests/scitex/decorators/test__not_implemented.py deleted file mode 100644 index 55c5d0ae..00000000 --- a/tests/scitex/decorators/test__not_implemented.py +++ /dev/null @@ -1,518 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Time-stamp: "2025-06-02 17:41:00 (claude-sonnet-4-20250514)" -# File: /data/gpfs/projects/punim2354/ywatanabe/.claude-worktree/scitex_repo/tests/scitex/decorators/test__not_implemented.py - -""" -Comprehensive tests for scitex.decorators._not_implemented module. - -This module tests the not_implemented decorator that marks functions as -not yet implemented, issues warnings, and prevents execution. -""" - -import pytest - -# Required for scitex.decorators module -pytest.importorskip("tqdm") -import functools -import warnings -from unittest.mock import Mock, patch - - -class TestNotImplemented: - """Test cases for scitex.decorators._not_implemented module.""" - - def setup_method(self): - """Set up test fixtures before each test method.""" - # Clear any existing warnings filters - warnings.resetwarnings() - - def test_not_implemented_import(self): - """Test that not_implemented decorator can be imported successfully.""" - from scitex.decorators import not_implemented - - assert callable(not_implemented) - - def test_not_implemented_basic_functionality(self): - """Test basic not_implemented decorator functionality.""" - from scitex.decorators import not_implemented - - @not_implemented - def unimplemented_function(): - return "Should not execute" - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - result = unimplemented_function() - - # Function should return None (not execute original code) - assert result is None - assert len(w) == 1 - assert issubclass(w[0].category, FutureWarning) - assert "unimplemented_function" in str(w[0].message) - assert "not yet available" in str(w[0].message) - - def test_not_implemented_warning_message(self): - """Test that warning message contains expected content.""" - from scitex.decorators import not_implemented - - @not_implemented - def test_method(): - pass - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - test_method() - - message = str(w[0].message) - assert "Attempt to use unimplemented method: 'test_method'" in message - assert "This method is not yet available" in message - - def test_not_implemented_with_arguments(self): - """Test not_implemented decorator with functions that take arguments.""" - from scitex.decorators import not_implemented - - @not_implemented - def function_with_args(a, b, c=None): - return a + b + (c or 0) - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - result = function_with_args(1, 2, c=3) - - # Should warn and return None, not execute original function - assert result is None - assert len(w) == 1 - assert "function_with_args" in str(w[0].message) - - def test_not_implemented_with_kwargs(self): - """Test not_implemented decorator with functions using *args and **kwargs.""" - from scitex.decorators import not_implemented - - @not_implemented - def flexible_function(*args, **kwargs): - return {"args": args, "kwargs": kwargs} - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - result = flexible_function(1, 2, 3, x=4, y=5) - - assert result is None - assert len(w) == 1 - assert "flexible_function" in str(w[0].message) - - def test_not_implemented_multiple_calls(self): - """Test that each call to not_implemented function emits a warning.""" - from scitex.decorators import not_implemented - - @not_implemented - def multi_call_function(): - return "test" - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - - # Call multiple times - for i in range(3): - result = multi_call_function() - assert result is None - - # Should have one warning per call - assert len(w) == 3 - for warning in w: - assert "multi_call_function" in str(warning.message) - - def test_not_implemented_warning_category(self): - """Test that not_implemented decorator emits FutureWarning specifically.""" - from scitex.decorators import not_implemented - - @not_implemented - def category_test_function(): - return "test" - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - category_test_function() - - assert len(w) == 1 - assert w[0].category == FutureWarning - - def test_not_implemented_warning_stacklevel(self): - """Test that not_implemented warnings have correct stack level.""" - from scitex.decorators import not_implemented - - @not_implemented - def stacklevel_function(): - return "test" - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - - def caller_function(): - return stacklevel_function() - - caller_function() - - assert len(w) == 1 - # The warning should point to the caller, not the decorator - assert w[0].filename.endswith("test__not_implemented.py") - - def test_not_implemented_function_name_preservation(self): - """Test that decorated function name appears in warning message.""" - from scitex.decorators import not_implemented - - @not_implemented - def very_specific_function_name(): - pass - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - very_specific_function_name() - - assert len(w) == 1 - assert "very_specific_function_name" in str(w[0].message) - - def test_not_implemented_with_class_methods(self): - """Test not_implemented decorator with class methods.""" - from scitex.decorators import not_implemented - - class TestClass: - @not_implemented - def instance_method(self, value): - return value * 2 - - @staticmethod - @not_implemented - def static_method(value): - return value * 3 - - @classmethod - @not_implemented - def class_method(cls, value): - return value * 4 - - obj = TestClass() - - # Test instance method - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - result = obj.instance_method(5) - assert result is None - assert len(w) == 1 - assert "instance_method" in str(w[0].message) - - # Test static method - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - result = TestClass.static_method(5) - assert result is None - assert len(w) == 1 - assert "static_method" in str(w[0].message) - - # Test class method - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - result = TestClass.class_method(5) - assert result is None - assert len(w) == 1 - assert "class_method" in str(w[0].message) - - def test_not_implemented_prevents_execution(self): - """Test that not_implemented prevents original function execution.""" - from scitex.decorators import not_implemented - - execution_flag = {"executed": False} - - @not_implemented - def should_not_execute(): - execution_flag["executed"] = True - return "executed" - - with warnings.catch_warnings(record=True): - warnings.simplefilter("always") - result = should_not_execute() - - # Function should not have executed - assert not execution_flag["executed"] - assert result is None - - def test_not_implemented_with_complex_function(self): - """Test not_implemented with complex function having multiple features.""" - from scitex.decorators import not_implemented - - @not_implemented - def complex_function(a, b=10, *args, **kwargs): - """Complex function with docstring.""" - complex_calculation = a * b + sum(args) + sum(kwargs.values()) - return complex_calculation - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - result = complex_function(1, 2, 3, 4, x=5, y=6) - - assert result is None - assert len(w) == 1 - assert "complex_function" in str(w[0].message) - - def test_not_implemented_function_with_side_effects(self): - """Test that not_implemented prevents functions with side effects.""" - from scitex.decorators import not_implemented - - side_effect_list = [] - - @not_implemented - def function_with_side_effects(item): - side_effect_list.append(item) - return len(side_effect_list) - - with warnings.catch_warnings(record=True): - warnings.simplefilter("always") - result = function_with_side_effects("test_item") - - # Side effect should not have occurred - assert len(side_effect_list) == 0 - assert result is None - - def test_not_implemented_with_generators(self): - """Test not_implemented decorator with generator functions.""" - from scitex.decorators import not_implemented - - @not_implemented - def not_implemented_generator(n): - for i in range(n): - yield i * 2 - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - result = not_implemented_generator(3) - - # Generator should not be created - assert result is None - assert len(w) == 1 - assert "not_implemented_generator" in str(w[0].message) - - def test_not_implemented_return_value_consistency(self): - """Test that not_implemented always returns None.""" - from scitex.decorators import not_implemented - - @not_implemented - def return_string(): - return "string" - - @not_implemented - def return_number(): - return 42 - - @not_implemented - def return_list(): - return [1, 2, 3] - - @not_implemented - def return_none(): - return None - - with warnings.catch_warnings(record=True): - warnings.simplefilter("always") - - assert return_string() is None - assert return_number() is None - assert return_list() is None - assert return_none() is None - - def test_not_implemented_with_special_function_names(self): - """Test not_implemented decorator with special function names.""" - from scitex.decorators import not_implemented - - @not_implemented - def _private_function(): - return "private" - - @not_implemented - def __dunder_function__(): - return "dunder" - - @not_implemented - def func_with_numbers_123(): - return "numbers" - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - - _private_function() - __dunder_function__() - func_with_numbers_123() - - assert len(w) == 3 - assert "_private_function" in str(w[0].message) - assert "__dunder_function__" in str(w[1].message) - assert "func_with_numbers_123" in str(w[2].message) - - def test_not_implemented_with_multiple_decorators(self): - """Test not_implemented decorator when combined with other decorators.""" - from scitex.decorators import not_implemented - - def logging_decorator(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - print(f"Calling {func.__name__}") - return func(*args, **kwargs) - - return wrapper - - @logging_decorator - @not_implemented - def multi_decorated_function(x): - return x * 2 - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - result = multi_decorated_function(5) - - assert result is None - assert len(w) == 1 - assert "multi_decorated_function" in str(w[0].message) - - def test_not_implemented_unicode_function_names(self): - """Test not_implemented decorator with unicode function names.""" - from scitex.decorators import not_implemented - - # Create function with unicode name using exec - unicode_code = """ -@not_implemented -def función_unicode(): - return "unicode" -""" - - local_vars = {"not_implemented": not_implemented} - exec(unicode_code, globals(), local_vars) - función_unicode = local_vars["función_unicode"] - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - result = función_unicode() - - assert result is None - assert len(w) == 1 - assert "función_unicode" in str(w[0].message) - - def test_not_implemented_preserves_wrapper_behavior(self): - """Test that not_implemented creates proper wrapper function.""" - from scitex.decorators import not_implemented - - @not_implemented - def original_function(a, b): - """Original function docstring.""" - return a + b - - # Test that it's callable - assert callable(original_function) - - # Test wrapper behavior - with warnings.catch_warnings(record=True): - warnings.simplefilter("always") - - # Should accept any arguments without error - assert original_function(1, 2) is None - assert original_function(1, b=2) is None - assert original_function(a=1, b=2) is None - - def test_not_implemented_warning_message_format(self): - """Test the exact format of the warning message.""" - from scitex.decorators import not_implemented - - @not_implemented - def test_function(): - pass - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - test_function() - - expected_message = "Attempt to use unimplemented method: 'test_function'. This method is not yet available." - assert str(w[0].message) == expected_message - - -class TestNotImplementedEdgeCases: - """Test edge cases and error conditions for not_implemented decorator.""" - - def test_not_implemented_empty_function_name(self): - """Test not_implemented with dynamically created function with empty name.""" - from scitex.decorators import not_implemented - - # Create a function dynamically (though it will still have a name) - dynamic_func = lambda: "test" - dynamic_func.__name__ = "dynamic_test" - - decorated_func = not_implemented(dynamic_func) - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - result = decorated_func() - - assert result is None - assert len(w) == 1 - assert "dynamic_test" in str(w[0].message) - - def test_not_implemented_with_exception_in_original(self): - """Test that not_implemented prevents exceptions in original function.""" - from scitex.decorators import not_implemented - - @not_implemented - def function_that_would_raise(): - raise ValueError("This should not be raised") - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - - # Should not raise ValueError, just return None - result = function_that_would_raise() - - assert result is None - assert len(w) == 1 - # No ValueError should be raised - - -if __name__ == "__main__": - import os - - import pytest - - pytest.main([os.path.abspath(__file__)]) - -# -------------------------------------------------------------------------------- -# Start of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_not_implemented.py -# -------------------------------------------------------------------------------- -# #!./env/bin/python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-06-07 22:16:25 (ywatanabe)" -# # /home/ywatanabe/proj/scitex/src/scitex/gen/_not_implemented.py -# -# import warnings -# -# -# def not_implemented(func): -# """ -# Decorator to mark methods as not implemented, issue a warning, and prevent their execution. -# -# Arguments: -# func (callable): The function or method to decorate. -# -# Returns: -# callable: A wrapper function that issues a warning and raises NotImplementedError when called. -# """ -# -# def wrapper(*args, **kwargs): -# # Issue a warning before raising the error -# warnings.warn( -# f"Attempt to use unimplemented method: '{func.__name__}'. This method is not yet available.", -# category=FutureWarning, -# stacklevel=2, -# ) -# # # Raise the NotImplementedError -# # raise NotImplementedError(f"The method '{func.__name__}' is not implemented yet.") -# -# return wrapper - -# -------------------------------------------------------------------------------- -# End of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_not_implemented.py -# -------------------------------------------------------------------------------- diff --git a/tests/scitex/decorators/test__numpy_fn.py b/tests/scitex/decorators/test__numpy_fn.py deleted file mode 100644 index de8a23bf..00000000 --- a/tests/scitex/decorators/test__numpy_fn.py +++ /dev/null @@ -1,239 +0,0 @@ -#!/usr/bin/env python3 -# Timestamp: "2025-05-18 06:07:01 (ywatanabe)" -# File: /ssh:sp:/home/ywatanabe/proj/scitex_repo/tests/scitex/decorators/test__numpy_fn.py -# ---------------------------------------- -import os - -__FILE__ = "./tests/scitex/decorators/test__numpy_fn.py" -__DIR__ = os.path.dirname(__FILE__) -# ---------------------------------------- - -from functools import wraps -from unittest.mock import patch - -import numpy as np -import pytest - -# Required for scitex.decorators module -pytest.importorskip("tqdm") - -# Optional dependencies -torch = pytest.importorskip("torch") -pd = pytest.importorskip("pandas") - -from scitex.decorators import numpy_fn - - -@pytest.fixture -def test_data(): - """Create test data for tests.""" - return { - "list": [1.0, 2.0, 3.0], - "numpy": np.array([1.0, 2.0, 3.0]), - "pandas_series": pd.Series([1.0, 2.0, 3.0]), - "pandas_df": pd.DataFrame({"col1": [1.0, 2.0, 3.0]}), - "torch": torch.tensor([1.0, 2.0, 3.0]), - } - - -def test_numpy_fn_with_list_input(test_data): - """Test numpy_fn with list input.""" - - @numpy_fn - def dummy_function(arr): - # Check that input is indeed a numpy array - assert isinstance(arr, np.ndarray) - return arr + 1.0 - - # Input is a list, output should be list - with patch( - "scitex.decorators.to_numpy", - return_value=([np.array([1.0, 2.0, 3.0])], {}), - ): - result = dummy_function(test_data["list"]) - assert isinstance(result, list) - assert result == [2.0, 3.0, 4.0] - - -def test_numpy_fn_with_numpy_input(test_data): - """Test numpy_fn with numpy input.""" - - @numpy_fn - def dummy_function(arr): - assert isinstance(arr, np.ndarray) - return arr * 2.0 - - # Input is numpy, output should be numpy - with patch( - "scitex.decorators.to_numpy", - return_value=([np.array([1.0, 2.0, 3.0])], {}), - ): - result = dummy_function(test_data["numpy"]) - assert isinstance(result, np.ndarray) - np.testing.assert_allclose(result, np.array([2.0, 4.0, 6.0])) - - -def test_numpy_fn_with_torch_input(test_data): - """Test numpy_fn with torch tensor input.""" - - @numpy_fn - def dummy_function(arr): - assert isinstance(arr, np.ndarray) - return arr * 3.0 - - # Mock to_numpy to return appropriate values - with patch( - "scitex.decorators.to_numpy", - return_value=([np.array([1.0, 2.0, 3.0])], {}), - ): - # Mock torch.tensor for return conversion - with patch("torch.tensor", return_value=torch.tensor([3.0, 6.0, 9.0])): - result = dummy_function(test_data["torch"]) - assert isinstance(result, torch.Tensor) - torch.testing.assert_close(result, torch.tensor([3.0, 6.0, 9.0])) - - -def test_numpy_fn_nested_decorator(test_data): - """Test nested decorator behavior with numpy_fn.""" - - # Create a dummy decorator to simulate nesting - def dummy_decorator(func): - @wraps(func) - def wrapper(*args, **kwargs): - # Set nested context - wrapper._current_decorator = "dummy_decorator" - return func(*args, **kwargs) - - wrapper._is_wrapper = True - return wrapper - - # Apply both decorators (nested) - @numpy_fn - @dummy_decorator - def nested_function(arr): - # In nested mode, the type should pass through unchanged from dummy_decorator - assert not isinstance(arr, np.ndarray) - return arr - - with patch("scitex.decorators._numpy_fn.is_nested_decorator", return_value=True): - # Input list should stay as list due to nested context - result = nested_function(test_data["list"]) - assert isinstance(result, list) - assert result == test_data["list"] - - -if __name__ == "__main__": - import os - - import pytest - - pytest.main([os.path.abspath(__file__)]) - -# -------------------------------------------------------------------------------- -# Start of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_numpy_fn.py -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Timestamp: "2025-04-30 15:29:53 (ywatanabe)" -# # File: /home/ywatanabe/proj/scitex_repo/src/scitex/decorators/_numpy_fn.py -# # ---------------------------------------- -# import os -# -# __FILE__ = "./src/scitex/decorators/_numpy_fn.py" -# __DIR__ = os.path.dirname(__FILE__) -# # ---------------------------------------- -# -# import numpy as np -# -# THIS_FILE = "/home/ywatanabe/proj/scitex_repo/src/scitex/decorators/_numpy_fn.py" -# -# from functools import wraps -# from typing import Any as _Any -# from typing import Callable -# -# from ._converters import _return_always, is_nested_decorator, to_numpy -# -# -# def numpy_fn(func: Callable) -> Callable: -# @wraps(func) -# def wrapper(*args: _Any, **kwargs: _Any) -> _Any: -# # Skip conversion if already in a nested decorator context -# if is_nested_decorator(): -# results = func(*args, **kwargs) -# return results -# -# # Set the current decorator context -# wrapper._current_decorator = "numpy_fn" -# -# # Store original object for type preservation -# original_object = args[0] if args else None -# -# converted_args, converted_kwargs = to_numpy( -# *args, return_fn=_return_always, **kwargs -# ) -# -# # Skip strict assertion for certain types that may not convert to arrays -# # Instead, convert what we can and pass through what we can't -# validated_args = [] -# for arg_index, arg in enumerate(converted_args): -# if isinstance(arg, np.ndarray): -# validated_args.append(arg) -# elif isinstance(arg, (int, float, str, type(None))): -# # Pass through scalars and strings unchanged -# validated_args.append(arg) -# elif isinstance(arg, list) and all( -# isinstance(item, np.ndarray) for item in arg -# ): -# # List of arrays - pass through as is -# validated_args.append(arg) -# else: -# # Try one more conversion attempt -# try: -# validated_args.append(np.array(arg)) -# except: -# # If all else fails, pass through unchanged -# validated_args.append(arg) -# -# results = func(*validated_args, **converted_kwargs) -# -# # Convert results back to original input types -# if isinstance(results, np.ndarray): -# if original_object is not None: -# if isinstance(original_object, list): -# return results.tolist() -# elif ( -# hasattr(original_object, "__class__") -# and original_object.__class__.__name__ == "Tensor" -# ): -# import torch -# -# return torch.tensor(results) -# elif ( -# hasattr(original_object, "__class__") -# and original_object.__class__.__name__ == "DataFrame" -# ): -# import pandas as pd -# -# return pd.DataFrame(results) -# elif ( -# hasattr(original_object, "__class__") -# and original_object.__class__.__name__ == "Series" -# ): -# import pandas as pd -# -# return pd.Series(results) -# return results -# -# return results -# -# # Mark as a wrapper for detection -# wrapper._is_wrapper = True -# wrapper._decorator_type = "numpy_fn" -# return wrapper -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# End of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_numpy_fn.py -# -------------------------------------------------------------------------------- diff --git a/tests/scitex/decorators/test__pandas_fn.py b/tests/scitex/decorators/test__pandas_fn.py deleted file mode 100644 index 8f223138..00000000 --- a/tests/scitex/decorators/test__pandas_fn.py +++ /dev/null @@ -1,264 +0,0 @@ -#!/usr/bin/env python3 -# Timestamp: "2025-04-30 15:59:18 (ywatanabe)" -# File: /home/ywatanabe/proj/scitex_repo/tests/scitex/decorators/test__pandas_fn.py -# ---------------------------------------- -import os - -__FILE__ = "./tests/scitex/decorators/test__pandas_fn.py" -__DIR__ = os.path.dirname(__FILE__) -# ---------------------------------------- - -from functools import wraps -from unittest.mock import patch - -import numpy as np -import pytest - -# Required for scitex.decorators module -pytest.importorskip("tqdm") - -# Optional dependencies -pd = pytest.importorskip("pandas") -torch = pytest.importorskip("torch") -xr = pytest.importorskip("xarray") - -from scitex.decorators import pandas_fn - - -@pytest.fixture -def test_data(): - """Create test data for tests.""" - return { - "list": [1.0, 2.0, 3.0], - "numpy": np.array([1.0, 2.0, 3.0]), - "pandas_series": pd.Series([1.0, 2.0, 3.0]), - "pandas_df": pd.DataFrame({"col1": [1.0, 2.0, 3.0]}), - "torch": torch.tensor([1.0, 2.0, 3.0]), - "xarray": xr.DataArray([1.0, 2.0, 3.0]), - } - - -def test_pandas_fn_with_list_input(test_data): - """Test pandas_fn with list input.""" - - @pandas_fn - def dummy_function(df): - # Check that input is indeed a DataFrame - assert isinstance(df, pd.DataFrame) - return df + 1.0 - - # Input is a list, output should be list - result = dummy_function(test_data["list"]) - assert isinstance(result, list) - assert result == [[2.0], [3.0], [4.0]] or result == [2.0, 3.0, 4.0] - - -def test_pandas_fn_with_df_input(test_data): - """Test pandas_fn with DataFrame input.""" - - @pandas_fn - def dummy_function(df): - assert isinstance(df, pd.DataFrame) - return df * 2.0 - - # Input is DataFrame, output should be DataFrame - result = dummy_function(test_data["pandas_df"]) - assert isinstance(result, pd.DataFrame) - pd.testing.assert_frame_equal(result, pd.DataFrame({"col1": [2.0, 4.0, 6.0]})) - - -def test_pandas_fn_with_numpy_input(test_data): - """Test pandas_fn with numpy input.""" - - @pandas_fn - def dummy_function(df): - assert isinstance(df, pd.DataFrame) - return df * 3.0 - - # Input is numpy, output should be numpy - result = dummy_function(test_data["numpy"]) - assert isinstance(result, np.ndarray) - np.testing.assert_allclose(result, np.array([[3.0], [6.0], [9.0]])) - - -def test_pandas_fn_nested_decorator(test_data): - """Test nested decorator behavior with pandas_fn.""" - - # Create a dummy decorator to simulate nesting - def dummy_decorator(func): - @wraps(func) - def wrapper(*args, **kwargs): - # Set nested context - wrapper._current_decorator = "dummy_decorator" - return func(*args, **kwargs) - - wrapper._is_wrapper = True - return wrapper - - # Apply both decorators (nested) - @pandas_fn - @dummy_decorator - def nested_function(arr): - # In nested mode, the type should pass through unchanged from dummy_decorator - assert not isinstance(arr, pd.DataFrame) - return arr - - with patch("scitex.decorators._pandas_fn.is_nested_decorator", return_value=True): - # Input series should stay as series due to nested context - result = nested_function(test_data["pandas_series"]) - assert isinstance(result, pd.Series) - pd.testing.assert_series_equal(result, test_data["pandas_series"]) - - -if __name__ == "__main__": - import os - - import pytest - - pytest.main([os.path.abspath(__file__)]) - -# -------------------------------------------------------------------------------- -# Start of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_pandas_fn.py -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Timestamp: "2025-04-30 15:44:00 (ywatanabe)" -# # File: /home/ywatanabe/proj/scitex_repo/src/scitex/decorators/_pandas_fn.py -# # ---------------------------------------- -# import os -# -# __FILE__ = "./src/scitex/decorators/_pandas_fn.py" -# __DIR__ = os.path.dirname(__FILE__) -# # ---------------------------------------- -# -# THIS_FILE = "/home/ywatanabe/proj/scitex_repo/src/scitex/decorators/_pandas_fn.py" -# -# from functools import wraps -# from typing import Any as _Any -# from typing import Callable -# -# import numpy as np -# -# from ._converters import is_nested_decorator -# -# -# def pandas_fn(func: Callable) -> Callable: -# @wraps(func) -# def wrapper(*args: _Any, **kwargs: _Any) -> _Any: -# # Skip conversion if already in a nested decorator context -# if is_nested_decorator(): -# results = func(*args, **kwargs) -# return results -# -# # Set the current decorator context -# wrapper._current_decorator = "pandas_fn" -# -# # Store original object for type preservation -# original_object = args[0] if args else None -# -# # Convert args to pandas DataFrames -# def to_pandas(data): -# import pandas as pd -# import torch -# import xarray as xr -# -# if data is None: -# return None -# elif isinstance(data, pd.DataFrame): -# return data -# elif isinstance(data, pd.Series): -# return pd.DataFrame(data) -# elif isinstance(data, np.ndarray): -# return pd.DataFrame(data) -# elif isinstance(data, list): -# try: -# return pd.DataFrame(data) -# except: -# # If list can't be converted to DataFrame, return as is -# return data -# elif hasattr(data, "__class__") and data.__class__.__name__ == "Tensor": -# return pd.DataFrame(data.detach().cpu().numpy()) -# elif hasattr(data, "__class__") and data.__class__.__name__ == "DataArray": -# return pd.DataFrame(data.values) -# elif isinstance(data, (int, float, str)): -# # Don't convert scalars to DataFrames -# return data -# else: -# try: -# return pd.DataFrame([data]) -# except: -# # If conversion fails, return as is -# return data -# -# converted_args = [to_pandas(arg) for arg in args] -# converted_kwargs = {k: to_pandas(v) for k, v in kwargs.items()} -# -# # Skip strict assertion for certain types -# import pandas as pd -# -# validated_args = [] -# for arg_index, arg in enumerate(converted_args): -# if isinstance(arg, pd.DataFrame): -# validated_args.append(arg) -# elif isinstance(arg, (int, float, str, type(None), pd.Series)): -# # Pass through scalars, strings, Series, and None unchanged -# validated_args.append(arg) -# elif isinstance(arg, list) and all( -# isinstance(item, pd.DataFrame) for item in arg -# ): -# # List of DataFrames - pass through as is -# validated_args.append(arg) -# else: -# # Try one more conversion attempt -# try: -# validated_args.append(pd.DataFrame(arg)) -# except: -# # If all else fails, pass through unchanged -# validated_args.append(arg) -# -# results = func(*validated_args, **converted_kwargs) -# -# # Convert results back to original input types -# import pandas as pd -# -# if isinstance(results, pd.DataFrame): -# if original_object is not None: -# if isinstance(original_object, list): -# return results.values.tolist() -# elif isinstance(original_object, np.ndarray): -# return results.values -# elif ( -# hasattr(original_object, "__class__") -# and original_object.__class__.__name__ == "Tensor" -# ): -# import torch -# -# return torch.tensor(results.values) -# elif isinstance(original_object, pd.Series): -# return ( -# pd.Series(results.iloc[:, 0]) -# if results.shape[1] > 0 -# else pd.Series() -# ) -# elif ( -# hasattr(original_object, "__class__") -# and original_object.__class__.__name__ == "DataArray" -# ): -# import xarray as xr -# -# return xr.DataArray(results.values) -# return results -# -# return results -# -# # Mark as a wrapper for detection -# wrapper._is_wrapper = True -# wrapper._decorator_type = "pandas_fn" -# return wrapper -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# End of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_pandas_fn.py -# -------------------------------------------------------------------------------- diff --git a/tests/scitex/decorators/test__preserve_doc.py b/tests/scitex/decorators/test__preserve_doc.py deleted file mode 100644 index 61cf73de..00000000 --- a/tests/scitex/decorators/test__preserve_doc.py +++ /dev/null @@ -1,310 +0,0 @@ -#!/usr/bin/env python3 -# Timestamp: "2025-04-28 15:45:18 (ywatanabe)" -# File: /home/ywatanabe/proj/scitex_repo/tests/scitex/decorators/test__preserve_doc.py -# ---------------------------------------- -import os - -__FILE__ = "./tests/scitex/decorators/test__preserve_doc.py" -__DIR__ = os.path.dirname(__FILE__) -# ---------------------------------------- - -import pytest - -# Required for scitex.decorators module -pytest.importorskip("tqdm") - -from scitex.decorators import preserve_doc - - -def test_preserve_doc_preserves_name(): - """Test that preserve_doc preserves the original function's name.""" - - @preserve_doc - def test_function(): - """Test docstring.""" - return True - - assert test_function.__name__ == "test_function" - - -def test_preserve_doc_preserves_docstring(): - """Test that preserve_doc preserves the original function's docstring.""" - - @preserve_doc - def test_function(): - """This docstring should be preserved.""" - return True - - assert test_function.__doc__ == "This docstring should be preserved." - - -def test_preserve_doc_preserves_functionality(): - """Test that preserve_doc doesn't alter the function's behavior.""" - - @preserve_doc - def add(xx, yy): - """Add two numbers.""" - return xx + yy - - assert add(2, 3) == 5 - assert add(-1, 1) == 0 - - -def test_preserve_doc_with_empty_docstring(): - """Test preserve_doc with a function that has no docstring.""" - - @preserve_doc - def no_docstring_function(): - pass - - assert no_docstring_function.__doc__ is None - - -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-11-14 07:44:00 (ywatanabe)" -# -# from functools import wraps -# -# def preserve_doc(loader_func): -# """Wrap the loader functions to preserve their docstrings""" -# @wraps(loader_func) -# def wrapper(*args, **kwargs): -# return loader_func(*args, **kwargs) -# -# return wrapper -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-11-14 07:44:00 (ywatanabe)" -# -# from functools import wraps -# -# def preserve_doc(loader_func): -# """Wrap the loader functions to preserve their docstrings""" -# @wraps(loader_func) -# def wrapper(*args, **kwargs): -# return loader_func(*args, **kwargs) -# -# return wrapper -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-11-14 07:44:00 (ywatanabe)" -# -# from functools import wraps -# -# def preserve_doc(loader_func): -# """Wrap the loader functions to preserve their docstrings""" -# @wraps(loader_func) -# def wrapper(*args, **kwargs): -# return loader_func(*args, **kwargs) -# -# return wrapper -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-11-14 07:44:00 (ywatanabe)" -# -# from functools import wraps -# -# def preserve_doc(loader_func): -# """Wrap the loader functions to preserve their docstrings""" -# @wraps(loader_func) -# def wrapper(*args, **kwargs): -# return loader_func(*args, **kwargs) -# -# return wrapper -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-11-14 07:44:00 (ywatanabe)" -# -# from functools import wraps -# -# def preserve_doc(loader_func): -# """Wrap the loader functions to preserve their docstrings""" -# @wraps(loader_func) -# def wrapper(*args, **kwargs): -# return loader_func(*args, **kwargs) -# -# return wrapper -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-11-14 07:44:00 (ywatanabe)" -# -# from functools import wraps -# -# def preserve_doc(loader_func): -# """Wrap the loader functions to preserve their docstrings""" -# @wraps(loader_func) -# def wrapper(*args, **kwargs): -# return loader_func(*args, **kwargs) -# -# return wrapper -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-11-14 07:44:00 (ywatanabe)" -# -# from functools import wraps -# -# def preserve_doc(loader_func): -# """Wrap the loader functions to preserve their docstrings""" -# @wraps(loader_func) -# def wrapper(*args, **kwargs): -# return loader_func(*args, **kwargs) -# -# return wrapper -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- - - -def test_preserve_doc_preserves_name(): - """Test that preserve_doc preserves the original function's name.""" - - @preserve_doc - def test_function(): - """Test docstring.""" - return True - - assert test_function.__name__ == "test_function" - - -def test_preserve_doc_preserves_docstring(): - """Test that preserve_doc preserves the original function's docstring.""" - - @preserve_doc - def test_function(): - """This docstring should be preserved.""" - return True - - assert test_function.__doc__ == "This docstring should be preserved." - - -def test_preserve_doc_preserves_functionality(): - """Test that preserve_doc doesn't alter the function's behavior.""" - - @preserve_doc - def add(xx, yy): - """Add two numbers.""" - return xx + yy - - assert add(2, 3) == 5 - assert add(-1, 1) == 0 - - -def test_preserve_doc_with_empty_docstring(): - """Test preserve_doc with a function that has no docstring.""" - - @preserve_doc - def no_docstring_function(): - pass - - assert no_docstring_function.__doc__ is None - - -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-11-14 07:44:00 (ywatanabe)" -# -# from functools import wraps -# -# def preserve_doc(loader_func): -# """Wrap the loader functions to preserve their docstrings""" -# @wraps(loader_func) -# def wrapper(*args, **kwargs): -# return loader_func(*args, **kwargs) -# -# return wrapper -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- - -if __name__ == "__main__": - import os - - import pytest - - pytest.main([os.path.abspath(__file__)]) - -# -------------------------------------------------------------------------------- -# Start of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_preserve_doc.py -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-11-14 07:44:00 (ywatanabe)" -# # File: ./scitex_repo/src/scitex/decorators/_preserve_doc.py -# -# from functools import wraps -# -# -# def preserve_doc(loader_func): -# """Wrap the loader functions to preserve their docstrings""" -# -# @wraps(loader_func) -# def wrapper(*args, **kwargs): -# return loader_func(*args, **kwargs) -# -# return wrapper -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# End of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_preserve_doc.py -# -------------------------------------------------------------------------------- diff --git a/tests/scitex/decorators/test__signal_fn.py b/tests/scitex/decorators/test__signal_fn.py deleted file mode 100644 index b10c0d9a..00000000 --- a/tests/scitex/decorators/test__signal_fn.py +++ /dev/null @@ -1,400 +0,0 @@ -#!/usr/bin/env python3 -# Timestamp: "2025-06-03 07:47:00 (ywatanabe)" -# File: ./tests/scitex/decorators/test__signal_fn.py - -from unittest.mock import Mock, patch - -import numpy as np -import pytest - -# Required for scitex.decorators module -pytest.importorskip("tqdm") - -# Optional dependencies -pd = pytest.importorskip("pandas") -torch = pytest.importorskip("torch") -xr = pytest.importorskip("xarray") - - -def test_signal_fn_decorator_basic_functionality(): - """Test basic functionality of signal_fn decorator.""" - from scitex.decorators import signal_fn - - @signal_fn - def dummy_signal_function(signal, param=1.0): - """Dummy function that adds param to signal.""" - return signal + param - - # Test with numpy array - input_signal = np.array([1.0, 2.0, 3.0]) - result = dummy_signal_function(input_signal, param=0.5) - - assert isinstance(result, np.ndarray) - np.testing.assert_array_almost_equal(result, np.array([1.5, 2.5, 3.5])) - - -def test_signal_fn_with_different_input_types(): - """Test signal_fn decorator with different input types.""" - from scitex.decorators import signal_fn - - @signal_fn - def identity_function(signal): - """Return signal as-is.""" - return signal - - # Test with list - input_list = [1.0, 2.0, 3.0] - result = identity_function(input_list) - assert isinstance(result, list) - assert result == input_list - - # Test with numpy array - input_array = np.array([1.0, 2.0, 3.0]) - result = identity_function(input_array) - assert isinstance(result, np.ndarray) - np.testing.assert_array_equal(result, input_array) - - # Test with pandas DataFrame - input_df = pd.DataFrame({"col1": [1.0, 2.0], "col2": [3.0, 4.0]}) - result = identity_function(input_df) - assert isinstance(result, pd.DataFrame) - # Note: DataFrame conversion may change column structure, just check values - np.testing.assert_array_equal(result.values, input_df.values) - - # Test with pandas Series - input_series = pd.Series([1.0, 2.0, 3.0]) - result = identity_function(input_series) - assert isinstance(result, pd.Series) - # Note: Series conversion may change dtype, just check values - np.testing.assert_array_equal(result.values, input_series.values) - - -def test_signal_fn_with_xarray(): - """Test signal_fn decorator with xarray DataArray.""" - from scitex.decorators import signal_fn - - @signal_fn - def identity_function(signal): - """Return signal as-is.""" - return signal - - # Test with xarray DataArray - input_xr = xr.DataArray([1.0, 2.0, 3.0], dims=["x"]) - result = identity_function(input_xr) - assert isinstance(result, xr.DataArray) - # Note: xarray conversion may change dimension names, just check values - np.testing.assert_array_equal(result.values, input_xr.values) - - -def test_signal_fn_preserves_additional_arguments(): - """Test that signal_fn only converts first argument, preserves others.""" - from scitex.decorators import signal_fn - - @signal_fn - def signal_with_params(signal, fs, window_size): - """Function with signal and non-signal parameters.""" - # Verify that fs and window_size are preserved as original types - assert isinstance(fs, (int, float)) - assert isinstance(window_size, int) - return signal * fs / window_size - - input_signal = np.array([1.0, 2.0, 3.0]) - fs = 256.0 # sampling frequency - window_size = 128 # window size - - result = signal_with_params(input_signal, fs, window_size) - - assert isinstance(result, np.ndarray) - expected = input_signal * fs / window_size - np.testing.assert_array_almost_equal(result, expected) - - -def test_signal_fn_tuple_return(): - """Test signal_fn decorator with tuple return values.""" - from scitex.decorators import signal_fn - - @signal_fn - def function_returning_tuple(signal): - """Function that returns tuple (signal, metadata).""" - # Return processed signal and some metadata - processed_signal = signal * 2 - metadata = {"factor": 2.0} - return processed_signal, metadata - - input_signal = np.array([1.0, 2.0, 3.0]) - result_signal, result_metadata = function_returning_tuple(input_signal) - - # Signal should be converted back to numpy - assert isinstance(result_signal, np.ndarray) - np.testing.assert_array_almost_equal(result_signal, np.array([2.0, 4.0, 6.0])) - - # Metadata should remain unchanged - assert result_metadata == {"factor": 2.0} - - -def test_signal_fn_with_empty_args(): - """Test signal_fn decorator with empty arguments.""" - from scitex.decorators import signal_fn - - @signal_fn - def function_no_args(): - """Function with no arguments.""" - return torch.tensor([1.0, 2.0, 3.0]) - - result = function_no_args() - # Should return torch tensor since no original object to convert back to - assert isinstance(result, torch.Tensor) - - -def test_signal_fn_nested_decorator_detection(): - """Test signal_fn decorator nested decorator detection.""" - from scitex.decorators import signal_fn - - # Mock nested decorator context - with patch("scitex.decorators._signal_fn.is_nested_decorator", return_value=True): - - @signal_fn - def nested_function(signal): - return signal - - input_signal = np.array([1.0, 2.0, 3.0]) - result = nested_function(input_signal) - - # Should bypass conversion when nested - assert result is input_signal - - -def test_signal_fn_decorator_attributes(): - """Test that signal_fn decorator sets proper attributes.""" - from scitex.decorators import signal_fn - - @signal_fn - def test_function(signal): - return signal - - # Check decorator attributes - assert hasattr(test_function, "_is_wrapper") - assert test_function._is_wrapper is True - assert hasattr(test_function, "_decorator_type") - assert test_function._decorator_type == "signal_fn" - - -def test_signal_fn_with_kwargs(): - """Test signal_fn decorator with keyword arguments.""" - from scitex.decorators import signal_fn - - @signal_fn - def signal_with_kwargs(signal, scale=1.0, offset=0.0): - """Function with keyword arguments.""" - return signal * scale + offset - - input_signal = np.array([1.0, 2.0, 3.0]) - result = signal_with_kwargs(input_signal, scale=2.0, offset=1.0) - - assert isinstance(result, np.ndarray) - expected = input_signal * 2.0 + 1.0 - np.testing.assert_array_almost_equal(result, expected) - - -def test_signal_fn_torch_tensor_input(): - """Test signal_fn decorator with torch tensor input.""" - from scitex.decorators import signal_fn - - @signal_fn - def torch_identity(signal): - """Return signal as-is.""" - return signal - - input_tensor = torch.tensor([1.0, 2.0, 3.0]) - result = torch_identity(input_tensor) - - # Should remain as torch tensor since input was torch tensor - assert isinstance(result, torch.Tensor) - torch.testing.assert_close(result, input_tensor) - - -def test_signal_fn_complex_processing(): - """Test signal_fn decorator with more complex signal processing.""" - from scitex.decorators import signal_fn - - @signal_fn - def complex_processing(signal, multiplier, add_noise=False): - """Complex processing function.""" - processed = signal * multiplier - if add_noise: - noise = torch.randn_like(processed) * 0.01 - processed = processed + noise - return processed - - input_signal = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) - result = complex_processing(input_signal, multiplier=2.0, add_noise=False) - - assert isinstance(result, np.ndarray) - expected = input_signal * 2.0 - np.testing.assert_array_almost_equal(result, expected) - - -def test_signal_fn_error_handling(): - """Test signal_fn decorator error handling.""" - from scitex.decorators import signal_fn - - @signal_fn - def function_with_error(signal): - """Function that raises an error.""" - raise ValueError("Test error") - - input_signal = np.array([1.0, 2.0, 3.0]) - - # Error should propagate through decorator - with pytest.raises(ValueError, match="Test error"): - function_with_error(input_signal) - - -@patch("scitex.decorators._signal_fn.to_torch") -def test_signal_fn_conversion_mocking(mock_to_torch): - """Test signal_fn decorator with mocked conversion functions.""" - from scitex.decorators import signal_fn - - # Mock the to_torch conversion - mock_tensor = torch.tensor([1.0, 2.0, 3.0]) - mock_to_torch.return_value = [[mock_tensor]] - - @signal_fn - def mock_function(signal): - return signal + 1 - - input_signal = np.array([1.0, 2.0, 3.0]) - result = mock_function(input_signal) - - # Verify to_torch was called - mock_to_torch.assert_called_once() - - # Result should be converted back to numpy - assert isinstance(result, np.ndarray) - - -if __name__ == "__main__": - import os - - import pytest - - pytest.main([os.path.abspath(__file__)]) - -# -------------------------------------------------------------------------------- -# Start of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_signal_fn.py -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Timestamp: "2025-05-31 (ywatanabe)" -# # File: /home/ywatanabe/proj/scitex_repo/src/scitex/decorators/_signal_fn.py -# # ---------------------------------------- -# import os -# -# __FILE__ = "./src/scitex/decorators/_signal_fn.py" -# __DIR__ = os.path.dirname(__FILE__) -# # ---------------------------------------- -# -# from functools import wraps -# from typing import Any as _Any -# from typing import Callable -# -# import numpy as np -# -# from ._converters import _return_always, is_nested_decorator, to_torch -# -# -# def signal_fn(func: Callable) -> Callable: -# """Decorator for signal processing functions that converts only the first argument (signal) to torch tensor. -# -# This decorator is designed for DSP functions where: -# - The first argument is the signal data that should be converted to torch tensor -# - Other arguments (like sampling frequency, bands, etc.) should remain as-is -# """ -# -# @wraps(func) -# def wrapper(*args: _Any, **kwargs: _Any) -> _Any: -# # Skip conversion if already in a nested decorator context -# if is_nested_decorator(): -# results = func(*args, **kwargs) -# return results -# -# # Set the current decorator context -# wrapper._current_decorator = "signal_fn" -# -# # Store original object for type preservation -# original_object = args[0] if args else None -# -# # Convert only the first argument (signal) to torch tensor -# if args: -# # Convert first argument to torch -# converted_first_arg = to_torch(args[0], return_fn=_return_always)[0][0] -# -# # Keep other arguments as-is -# converted_args = (converted_first_arg,) + args[1:] -# else: -# converted_args = args -# -# results = func(*converted_args, **kwargs) -# -# # Convert results back to original input types -# import torch -# -# if isinstance(results, torch.Tensor): -# if original_object is not None: -# if isinstance(original_object, list): -# return results.detach().cpu().numpy().tolist() -# elif isinstance(original_object, np.ndarray): -# return results.detach().cpu().numpy() -# elif ( -# hasattr(original_object, "__class__") -# and original_object.__class__.__name__ == "DataFrame" -# ): -# import pandas as pd -# -# return pd.DataFrame(results.detach().cpu().numpy()) -# elif ( -# hasattr(original_object, "__class__") -# and original_object.__class__.__name__ == "Series" -# ): -# import pandas as pd -# -# return pd.Series(results.detach().cpu().numpy().flatten()) -# elif ( -# hasattr(original_object, "__class__") -# and original_object.__class__.__name__ == "DataArray" -# ): -# import xarray as xr -# -# return xr.DataArray(results.detach().cpu().numpy()) -# return results -# -# # Handle tuple returns (e.g., (signal, frequencies)) -# elif isinstance(results, tuple): -# import torch -# -# converted_results = [] -# for r in results: -# if isinstance(r, torch.Tensor): -# if original_object is not None and isinstance( -# original_object, np.ndarray -# ): -# converted_results.append(r.detach().cpu().numpy()) -# else: -# converted_results.append(r) -# else: -# converted_results.append(r) -# return tuple(converted_results) -# -# return results -# -# # Mark as a wrapper for detection -# wrapper._is_wrapper = True -# wrapper._decorator_type = "signal_fn" -# return wrapper -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# End of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_signal_fn.py -# -------------------------------------------------------------------------------- diff --git a/tests/scitex/decorators/test__timeout.py b/tests/scitex/decorators/test__timeout.py deleted file mode 100644 index be31579e..00000000 --- a/tests/scitex/decorators/test__timeout.py +++ /dev/null @@ -1,665 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Timestamp: "2025-04-28 15:45:34 (ywatanabe)" -# File: /home/ywatanabe/proj/scitex_repo/tests/scitex/decorators/test__timeout.py -# ---------------------------------------- -import os - -__FILE__ = "./tests/scitex/decorators/test__timeout.py" -__DIR__ = os.path.dirname(__FILE__) -# ---------------------------------------- - -import time - -import pytest - -# Required for scitex.decorators module -pytest.importorskip("tqdm") - -from scitex.decorators import timeout - - -def test_timeout_decorator_success(): - """Test that timeout decorator allows functions to complete within time limit.""" - - @timeout(seconds=2, error_message="Test timed out") - def quick_function(): - time.sleep(0.5) - return "Success" - - result = quick_function() - assert result == "Success" - - -def test_timeout_decorator_raises_exception(): - """Test that timeout decorator raises TimeoutError for functions exceeding time limit.""" - - @timeout(seconds=0.5, error_message="Custom timeout message") - def slow_function(): - time.sleep(1) - return "This should not be returned" - - with pytest.raises(TimeoutError) as excinfo: - slow_function() - - assert "Custom timeout message" in str(excinfo.value) - - -def test_timeout_with_arguments(): - """Test timeout decorator with functions that take arguments.""" - - @timeout(seconds=1) - def function_with_args(xx, yy): - time.sleep(0.2) - return xx + yy - - result = function_with_args(2, 3) - assert result == 5 - - -def test_timeout_with_keyword_arguments(): - """Test timeout decorator with functions that take keyword arguments.""" - - @timeout(seconds=1) - def function_with_kwargs(xx=0, yy=0): - time.sleep(0.2) - return xx * yy - - result = function_with_kwargs(xx=5, yy=4) - assert result == 20 - - -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-11-07 05:58:41 (ywatanabe)" -# -# #!./env/bin/python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-04-23 19:11:33" -# # Author: Yusuke Watanabe (ywata1989@gmail.com) -# -# """ -# This script does XYZ. -# """ -# -# """ -# Imports -# """ -# -# -# """ -# Config -# """ -# # CONFIG = scitex.gen.load_configs() -# -# """ -# Functions & Classes -# """ -# from multiprocessing import Process, Queue -# -# -# def timeout(seconds=10, error_message="Timeout"): -# def decorator(func): -# def wrapper(*args, **kwargs): -# def queue_wrapper(queue, args, kwargs): -# result = func(*args, **kwargs) -# queue.put(result) -# -# queue = Queue() -# args_for_process = (queue, args, kwargs) -# process = Process(target=queue_wrapper, args=args_for_process) -# process.start() -# process.join(timeout=seconds) -# -# if process.is_alive(): -# process.terminate() -# raise TimeoutError(error_message) -# else: -# return queue.get() -# -# return wrapper -# -# return decorator -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-11-07 05:58:41 (ywatanabe)" -# -# #!./env/bin/python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-04-23 19:11:33" -# # Author: Yusuke Watanabe (ywata1989@gmail.com) -# -# """ -# This script does XYZ. -# """ -# -# """ -# Imports -# """ -# -# -# """ -# Config -# """ -# # CONFIG = scitex.gen.load_configs() -# -# """ -# Functions & Classes -# """ -# from multiprocessing import Process, Queue -# -# -# def timeout(seconds=10, error_message="Timeout"): -# def decorator(func): -# def wrapper(*args, **kwargs): -# def queue_wrapper(queue, args, kwargs): -# result = func(*args, **kwargs) -# queue.put(result) -# -# queue = Queue() -# args_for_process = (queue, args, kwargs) -# process = Process(target=queue_wrapper, args=args_for_process) -# process.start() -# process.join(timeout=seconds) -# -# if process.is_alive(): -# process.terminate() -# raise TimeoutError(error_message) -# else: -# return queue.get() -# -# return wrapper -# -# return decorator -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-11-07 05:58:41 (ywatanabe)" -# -# #!./env/bin/python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-04-23 19:11:33" -# # Author: Yusuke Watanabe (ywata1989@gmail.com) -# -# """ -# This script does XYZ. -# """ -# -# """ -# Imports -# """ -# -# -# """ -# Config -# """ -# # CONFIG = scitex.gen.load_configs() -# -# """ -# Functions & Classes -# """ -# from multiprocessing import Process, Queue -# -# -# def timeout(seconds=10, error_message="Timeout"): -# def decorator(func): -# def wrapper(*args, **kwargs): -# def queue_wrapper(queue, args, kwargs): -# result = func(*args, **kwargs) -# queue.put(result) -# -# queue = Queue() -# args_for_process = (queue, args, kwargs) -# process = Process(target=queue_wrapper, args=args_for_process) -# process.start() -# process.join(timeout=seconds) -# -# if process.is_alive(): -# process.terminate() -# raise TimeoutError(error_message) -# else: -# return queue.get() -# -# return wrapper -# -# return decorator -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-11-07 05:58:41 (ywatanabe)" -# -# #!./env/bin/python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-04-23 19:11:33" -# # Author: Yusuke Watanabe (ywata1989@gmail.com) -# -# """ -# This script does XYZ. -# """ -# -# """ -# Imports -# """ -# -# -# """ -# Config -# """ -# # CONFIG = scitex.gen.load_configs() -# -# """ -# Functions & Classes -# """ -# from multiprocessing import Process, Queue -# -# -# def timeout(seconds=10, error_message="Timeout"): -# def decorator(func): -# def wrapper(*args, **kwargs): -# def queue_wrapper(queue, args, kwargs): -# result = func(*args, **kwargs) -# queue.put(result) -# -# queue = Queue() -# args_for_process = (queue, args, kwargs) -# process = Process(target=queue_wrapper, args=args_for_process) -# process.start() -# process.join(timeout=seconds) -# -# if process.is_alive(): -# process.terminate() -# raise TimeoutError(error_message) -# else: -# return queue.get() -# -# return wrapper -# -# return decorator -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-11-07 05:58:41 (ywatanabe)" -# -# #!./env/bin/python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-04-23 19:11:33" -# # Author: Yusuke Watanabe (ywata1989@gmail.com) -# -# """ -# This script does XYZ. -# """ -# -# """ -# Imports -# """ -# -# -# """ -# Config -# """ -# # CONFIG = scitex.gen.load_configs() -# -# """ -# Functions & Classes -# """ -# from multiprocessing import Process, Queue -# -# -# def timeout(seconds=10, error_message="Timeout"): -# def decorator(func): -# def wrapper(*args, **kwargs): -# def queue_wrapper(queue, args, kwargs): -# result = func(*args, **kwargs) -# queue.put(result) -# -# queue = Queue() -# args_for_process = (queue, args, kwargs) -# process = Process(target=queue_wrapper, args=args_for_process) -# process.start() -# process.join(timeout=seconds) -# -# if process.is_alive(): -# process.terminate() -# raise TimeoutError(error_message) -# else: -# return queue.get() -# -# return wrapper -# -# return decorator -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-11-07 05:58:41 (ywatanabe)" -# -# #!./env/bin/python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-04-23 19:11:33" -# # Author: Yusuke Watanabe (ywata1989@gmail.com) -# -# """ -# This script does XYZ. -# """ -# -# """ -# Imports -# """ -# -# -# """ -# Config -# """ -# # CONFIG = scitex.gen.load_configs() -# -# """ -# Functions & Classes -# """ -# from multiprocessing import Process, Queue -# -# -# def timeout(seconds=10, error_message="Timeout"): -# def decorator(func): -# def wrapper(*args, **kwargs): -# def queue_wrapper(queue, args, kwargs): -# result = func(*args, **kwargs) -# queue.put(result) -# -# queue = Queue() -# args_for_process = (queue, args, kwargs) -# process = Process(target=queue_wrapper, args=args_for_process) -# process.start() -# process.join(timeout=seconds) -# -# if process.is_alive(): -# process.terminate() -# raise TimeoutError(error_message) -# else: -# return queue.get() -# -# return wrapper -# -# return decorator -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-11-07 05:58:41 (ywatanabe)" -# -# #!./env/bin/python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-04-23 19:11:33" -# # Author: Yusuke Watanabe (ywata1989@gmail.com) -# -# """ -# This script does XYZ. -# """ -# -# """ -# Imports -# """ -# -# -# """ -# Config -# """ -# # CONFIG = scitex.gen.load_configs() -# -# """ -# Functions & Classes -# """ -# from multiprocessing import Process, Queue -# -# -# def timeout(seconds=10, error_message="Timeout"): -# def decorator(func): -# def wrapper(*args, **kwargs): -# def queue_wrapper(queue, args, kwargs): -# result = func(*args, **kwargs) -# queue.put(result) -# -# queue = Queue() -# args_for_process = (queue, args, kwargs) -# process = Process(target=queue_wrapper, args=args_for_process) -# process.start() -# process.join(timeout=seconds) -# -# if process.is_alive(): -# process.terminate() -# raise TimeoutError(error_message) -# else: -# return queue.get() -# -# return wrapper -# -# return decorator -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- - - -def test_timeout_decorator_success(): - """Test that timeout decorator allows functions to complete within time limit.""" - - @timeout(seconds=2, error_message="Test timed out") - def quick_function(): - time.sleep(0.5) - return "Success" - - result = quick_function() - assert result == "Success" - - -def test_timeout_decorator_raises_exception(): - """Test that timeout decorator raises TimeoutError for functions exceeding time limit.""" - - @timeout(seconds=0.5, error_message="Custom timeout message") - def slow_function(): - time.sleep(1) - return "This should not be returned" - - with pytest.raises(TimeoutError) as excinfo: - slow_function() - - assert "Custom timeout message" in str(excinfo.value) - - -def test_timeout_with_arguments(): - """Test timeout decorator with functions that take arguments.""" - - @timeout(seconds=1) - def function_with_args(xx, yy): - time.sleep(0.2) - return xx + yy - - result = function_with_args(2, 3) - assert result == 5 - - -def test_timeout_with_keyword_arguments(): - """Test timeout decorator with functions that take keyword arguments.""" - - @timeout(seconds=1) - def function_with_kwargs(xx=0, yy=0): - time.sleep(0.2) - return xx * yy - - result = function_with_kwargs(xx=5, yy=4) - assert result == 20 - - -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-11-07 05:58:41 (ywatanabe)" -# -# #!./env/bin/python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-04-23 19:11:33" -# # Author: Yusuke Watanabe (ywata1989@gmail.com) -# -# """ -# This script does XYZ. -# """ -# -# """ -# Imports -# """ -# -# -# """ -# Config -# """ -# # CONFIG = scitex.gen.load_configs() -# -# """ -# Functions & Classes -# """ -# from multiprocessing import Process, Queue -# -# -# def timeout(seconds=10, error_message="Timeout"): -# def decorator(func): -# def wrapper(*args, **kwargs): -# def queue_wrapper(queue, args, kwargs): -# result = func(*args, **kwargs) -# queue.put(result) -# -# queue = Queue() -# args_for_process = (queue, args, kwargs) -# process = Process(target=queue_wrapper, args=args_for_process) -# process.start() -# process.join(timeout=seconds) -# -# if process.is_alive(): -# process.terminate() -# raise TimeoutError(error_message) -# else: -# return queue.get() -# -# return wrapper -# -# return decorator -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- - -if __name__ == "__main__": - import os - - import pytest - - pytest.main([os.path.abspath(__file__)]) - -# -------------------------------------------------------------------------------- -# Start of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_timeout.py -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-11-07 05:58:41 (ywatanabe)" -# # File: ./scitex_repo/src/scitex/decorators/_timeout.py -# -# #!./env/bin/python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-04-23 19:11:33" -# # Author: Yusuke Watanabe (ywatanabe@scitex.ai) -# -# """ -# This script does XYZ. -# """ -# -# """ -# Imports -# """ -# -# -# """ -# Config -# """ -# # CONFIG = scitex.gen.load_configs() -# -# """ -# Functions & Classes -# """ -# from multiprocessing import Process, Queue -# -# -# def timeout(seconds=10, error_message="Timeout"): -# def decorator(func): -# def wrapper(*args, **kwargs): -# def queue_wrapper(queue, args, kwargs): -# result = func(*args, **kwargs) -# queue.put(result) -# -# queue = Queue() -# args_for_process = (queue, args, kwargs) -# process = Process(target=queue_wrapper, args=args_for_process) -# process.start() -# process.join(timeout=seconds) -# -# if process.is_alive(): -# process.terminate() -# raise TimeoutError(error_message) -# else: -# return queue.get() -# -# return wrapper -# -# return decorator -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# End of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_timeout.py -# -------------------------------------------------------------------------------- diff --git a/tests/scitex/decorators/test__torch_fn.py b/tests/scitex/decorators/test__torch_fn.py deleted file mode 100644 index ed343cd6..00000000 --- a/tests/scitex/decorators/test__torch_fn.py +++ /dev/null @@ -1,276 +0,0 @@ -#!/usr/bin/env python3 -# Timestamp: "2025-04-30 15:49:06 (ywatanabe)" -# File: /home/ywatanabe/proj/scitex_repo/tests/scitex/decorators/test__torch_fn.py -# ---------------------------------------- -import os - -__FILE__ = "./tests/scitex/decorators/test__torch_fn.py" -__DIR__ = os.path.dirname(__FILE__) -# ---------------------------------------- -from functools import wraps -from unittest.mock import patch - -import numpy as np -import pytest - -# Required for scitex.decorators module -pytest.importorskip("tqdm") - -# Optional dependencies -torch = pytest.importorskip("torch") -pd = pytest.importorskip("pandas") -xr = pytest.importorskip("xarray") - -from scitex.decorators import torch_fn - - -@pytest.fixture -def test_data(): - """Create test data for tests.""" - return { - "list": [1.0, 2.0, 3.0], - "nested_list": [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], - "numpy": np.array([1.0, 2.0, 3.0]), - "numpy_2d": np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), - "pandas_series": pd.Series([1.0, 2.0, 3.0]), - "pandas_df": pd.DataFrame({"col1": [1.0, 2.0, 3.0]}), - "torch": torch.tensor([1.0, 2.0, 3.0]), - "torch_2d": torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), - "xarray": xr.DataArray([1.0, 2.0, 3.0]), - } - - -def test_torch_fn_with_list_input(test_data): - """Test torch_fn with list input.""" - - # Skip this test for now - it's failing when run as part of the full suite - import pytest - - pytest.skip("This test needs fixing for full test suite runs") - - # Create a dummy test that passes to avoid failing the whole suite - assert True - - -def test_torch_fn_with_torch_input(test_data): - """Test torch_fn with torch input.""" - - # Skip this test for now - it's failing when run as part of the full suite - import pytest - - pytest.skip("This test needs fixing for full test suite runs") - - # Create a dummy test that passes to avoid failing the whole suite - assert True - - -def test_torch_fn_with_numpy_input(test_data): - """Test torch_fn with numpy input.""" - - # Skip this test for now - it's failing when run as part of the full suite - import pytest - - pytest.skip("This test needs fixing for full test suite runs") - - # Create a dummy test that passes to avoid failing the whole suite - assert True - - -def test_torch_fn_nested_decorator(test_data): - """Test nested decorator behavior with torch_fn.""" - - # Create a dummy decorator to simulate nesting - def dummy_decorator(func): - @wraps(func) - def wrapper(*args, **kwargs): - # Set nested context - wrapper._current_decorator = "dummy_decorator" - return func(*args, **kwargs) - - wrapper._is_wrapper = True - return wrapper - - # Apply both decorators (nested) - @torch_fn - @dummy_decorator - def nested_function(arr): - # In nested mode, the type should pass through unchanged from dummy_decorator - assert not isinstance(arr, torch.Tensor) - return arr - - with patch("scitex.decorators._torch_fn.is_nested_decorator", return_value=True): - # Input numpy should stay as numpy due to nested context - result = nested_function(test_data["numpy"]) - assert isinstance(result, np.ndarray) - assert np.array_equal(result, test_data["numpy"]) - - -if __name__ == "__main__": - import os - - import pytest - - pytest.main([os.path.abspath(__file__)]) - -# -------------------------------------------------------------------------------- -# Start of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_torch_fn.py -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Timestamp: "2025-04-30 15:40:43 (ywatanabe)" -# # File: /home/ywatanabe/proj/scitex_repo/src/scitex/decorators/_torch_fn.py -# # ---------------------------------------- -# import os -# -# __FILE__ = "./src/scitex/decorators/_torch_fn.py" -# __DIR__ = os.path.dirname(__FILE__) -# # ---------------------------------------- -# -# from functools import wraps -# from typing import Any as _Any -# from typing import Callable -# -# import numpy as np -# -# from ._converters import _return_always, is_nested_decorator, to_torch -# -# -# def torch_fn(func: Callable) -> Callable: -# """Decorator for PyTorch function compatibility. -# -# Automatically converts inputs to PyTorch tensors and handles various data types -# gracefully. Preserves the original input type in the output. -# -# Features -# -------- -# - Converts inputs to PyTorch tensors -# - Preserves scalar parameters (int, float, bool, str) -# - Preserves dimension tuples like dim=(0, 1) -# - Handles nested lists/tuples gracefully -# - Automatically converts axis to dim for torch functions -# - Applies device="cuda" if available -# - Returns output in same type as input (numpy->numpy, pandas->pandas, etc.) -# -# Parameters -# ---------- -# func : Callable -# The function to decorate -# -# Returns -# ------- -# Callable -# The decorated function -# -# Examples -# -------- -# >>> @torch_fn -# ... def mean_squared(x, dim=None): -# ... return (x ** 2).mean(dim=dim) -# >>> -# >>> # Works with numpy arrays -# >>> result = mean_squared(np.array([1, 2, 3])) -# >>> -# >>> # Works with nested lists -# >>> result = mean_squared([[1, 2], [3, 4]]) -# >>> -# >>> # Preserves dimension tuples -# >>> result = mean_squared(data, dim=(0, 1)) -# -# Notes -# ----- -# For optimal performance with batch processing, apply torch_fn before batch_fn: -# @batch_fn -# @torch_fn -# def my_function(x): ... -# -# Or use auto-ordering to handle this automatically. -# """ -# -# @wraps(func) -# def wrapper(*args: _Any, **kwargs: _Any) -> _Any: -# # Skip conversion if already in a nested decorator context -# if is_nested_decorator(): -# results = func(*args, **kwargs) -# return results -# -# # Set the current decorator context -# wrapper._current_decorator = "torch_fn" -# -# # Store original object for type preservation -# original_object = args[0] if args else None -# -# converted_args, converted_kwargs = to_torch( -# *args, return_fn=_return_always, **kwargs -# ) -# -# # Skip strict assertion for certain types that may not convert to tensors -# # Instead, convert what we can and pass through what we can't -# import torch -# -# validated_args = [] -# for arg_index, arg in enumerate(converted_args): -# if isinstance(arg, torch.Tensor): -# validated_args.append(arg) -# elif isinstance(arg, (int, float, str, type(None))): -# # Pass through scalars and strings unchanged -# validated_args.append(arg) -# elif isinstance(arg, list) and all( -# isinstance(item, torch.Tensor) for item in arg -# ): -# # List of tensors - pass through as is -# validated_args.append(arg) -# else: -# # Try one more conversion attempt -# try: -# validated_args.append(torch.tensor(arg).float()) -# except: -# # If all else fails, pass through unchanged -# validated_args.append(arg) -# -# results = func(*validated_args, **converted_kwargs) -# -# # Convert results back to original input types -# import torch -# -# if isinstance(results, torch.Tensor): -# if original_object is not None: -# if isinstance(original_object, list): -# return results.detach().cpu().numpy().tolist() -# elif isinstance(original_object, np.ndarray): -# return results.detach().cpu().numpy() -# elif ( -# hasattr(original_object, "__class__") -# and original_object.__class__.__name__ == "DataFrame" -# ): -# import pandas as pd -# -# return pd.DataFrame(results.detach().cpu().numpy()) -# elif ( -# hasattr(original_object, "__class__") -# and original_object.__class__.__name__ == "Series" -# ): -# import pandas as pd -# -# return pd.Series(results.detach().cpu().numpy().flatten()) -# elif ( -# hasattr(original_object, "__class__") -# and original_object.__class__.__name__ == "DataArray" -# ): -# import xarray as xr -# -# return xr.DataArray(results.detach().cpu().numpy()) -# return results -# -# return results -# -# # Mark as a wrapper for detection -# wrapper._is_wrapper = True -# wrapper._decorator_type = "torch_fn" -# return wrapper -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# End of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_torch_fn.py -# -------------------------------------------------------------------------------- diff --git a/tests/scitex/decorators/test__wrap.py b/tests/scitex/decorators/test__wrap.py deleted file mode 100644 index 84e4444f..00000000 --- a/tests/scitex/decorators/test__wrap.py +++ /dev/null @@ -1,476 +0,0 @@ -#!/usr/bin/env python3 -# Timestamp: "2025-04-28 15:45:52 (ywatanabe)" -# File: /home/ywatanabe/proj/scitex_repo/tests/scitex/decorators/test__wrap.py -# ---------------------------------------- -import os - -__FILE__ = "./tests/scitex/decorators/test__wrap.py" -__DIR__ = os.path.dirname(__FILE__) -# ---------------------------------------- - -import inspect - -import pytest - -# Required for scitex.decorators module -pytest.importorskip("tqdm") - -from scitex.decorators import wrap - - -def test_wrap_preserves_function_metadata(): - """Test that wrap preserves the original function's metadata.""" - - @wrap - def test_function(xx: int) -> int: - """Test docstring.""" - return xx + 1 - - # Check if the wrapper preserves the original function's name - assert test_function.__name__ == "test_function" - - # Check if the wrapper preserves the original function's docstring - assert test_function.__doc__ == "Test docstring." - - # Check if the wrapper preserves the original function's signature - signature = inspect.signature(test_function) - assert str(signature) == "(xx: int) -> int" - - # Check if the wrapper preserves the original function's module - assert test_function.__module__ == __name__ - - -def test_wrap_functionality(): - """Test that wrap doesn't modify the function's behavior.""" - - @wrap - def add_one(xx: int) -> int: - return xx + 1 - - # Test with integer argument - assert add_one(1) == 2 - assert add_one(0) == 1 - assert add_one(-1) == 0 - - # Test with different parameter names - @wrap - def multiply(aa: int, bb: int) -> int: - return aa * bb - - assert multiply(2, 3) == 6 - assert multiply(aa=2, bb=3) == 6 - assert multiply(2, bb=3) == 6 - - -def test_wrap_manual_usage(): - """Test using wrap as a function rather than a decorator.""" - - def subtract(xx: int, yy: int) -> int: - return xx - yy - - wrapped_func = wrap(subtract) - - assert wrapped_func(5, 3) == 2 - assert wrapped_func(xx=10, yy=5) == 5 - - -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-11-07 05:57:34 (ywatanabe)" -# -# import functools -# -# -# def wrap(func): -# """Basic function wrapper that preserves function metadata. -# -# Usage: -# @wrap -# def my_function(x): -# return x + 1 -# -# # Or manually: -# def my_function(x): -# return x + 1 -# wrapped_func = wrap(my_function) -# -# This wrapper is useful as a template for creating more complex decorators -# or when you want to ensure function metadata is preserved. -# """ -# @functools.wraps(func) -# def wrapper(*args, **kwargs): -# return func(*args, **kwargs) -# -# return wrapper -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-11-07 05:57:34 (ywatanabe)" -# -# import functools -# -# -# def wrap(func): -# """Basic function wrapper that preserves function metadata. -# -# Usage: -# @wrap -# def my_function(x): -# return x + 1 -# -# # Or manually: -# def my_function(x): -# return x + 1 -# wrapped_func = wrap(my_function) -# -# This wrapper is useful as a template for creating more complex decorators -# or when you want to ensure function metadata is preserved. -# """ -# @functools.wraps(func) -# def wrapper(*args, **kwargs): -# return func(*args, **kwargs) -# -# return wrapper -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-11-07 05:57:34 (ywatanabe)" -# -# import functools -# -# -# def wrap(func): -# """Basic function wrapper that preserves function metadata. -# -# Usage: -# @wrap -# def my_function(x): -# return x + 1 -# -# # Or manually: -# def my_function(x): -# return x + 1 -# wrapped_func = wrap(my_function) -# -# This wrapper is useful as a template for creating more complex decorators -# or when you want to ensure function metadata is preserved. -# """ -# @functools.wraps(func) -# def wrapper(*args, **kwargs): -# return func(*args, **kwargs) -# -# return wrapper -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-11-07 05:57:34 (ywatanabe)" -# -# import functools -# -# -# def wrap(func): -# """Basic function wrapper that preserves function metadata. -# -# Usage: -# @wrap -# def my_function(x): -# return x + 1 -# -# # Or manually: -# def my_function(x): -# return x + 1 -# wrapped_func = wrap(my_function) -# -# This wrapper is useful as a template for creating more complex decorators -# or when you want to ensure function metadata is preserved. -# """ -# @functools.wraps(func) -# def wrapper(*args, **kwargs): -# return func(*args, **kwargs) -# -# return wrapper -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-11-07 05:57:34 (ywatanabe)" -# -# import functools -# -# -# def wrap(func): -# """Basic function wrapper that preserves function metadata. -# -# Usage: -# @wrap -# def my_function(x): -# return x + 1 -# -# # Or manually: -# def my_function(x): -# return x + 1 -# wrapped_func = wrap(my_function) -# -# This wrapper is useful as a template for creating more complex decorators -# or when you want to ensure function metadata is preserved. -# """ -# @functools.wraps(func) -# def wrapper(*args, **kwargs): -# return func(*args, **kwargs) -# -# return wrapper -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-11-07 05:57:34 (ywatanabe)" -# -# import functools -# -# -# def wrap(func): -# """Basic function wrapper that preserves function metadata. -# -# Usage: -# @wrap -# def my_function(x): -# return x + 1 -# -# # Or manually: -# def my_function(x): -# return x + 1 -# wrapped_func = wrap(my_function) -# -# This wrapper is useful as a template for creating more complex decorators -# or when you want to ensure function metadata is preserved. -# """ -# @functools.wraps(func) -# def wrapper(*args, **kwargs): -# return func(*args, **kwargs) -# -# return wrapper -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-11-07 05:57:34 (ywatanabe)" -# -# import functools -# -# -# def wrap(func): -# """Basic function wrapper that preserves function metadata. -# -# Usage: -# @wrap -# def my_function(x): -# return x + 1 -# -# # Or manually: -# def my_function(x): -# return x + 1 -# wrapped_func = wrap(my_function) -# -# This wrapper is useful as a template for creating more complex decorators -# or when you want to ensure function metadata is preserved. -# """ -# @functools.wraps(func) -# def wrapper(*args, **kwargs): -# return func(*args, **kwargs) -# -# return wrapper -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- - - -def test_wrap_preserves_function_metadata(): - """Test that wrap preserves the original function's metadata.""" - - @wrap - def test_function(xx: int) -> int: - """Test docstring.""" - return xx + 1 - - # Check if the wrapper preserves the original function's name - assert test_function.__name__ == "test_function" - - # Check if the wrapper preserves the original function's docstring - assert test_function.__doc__ == "Test docstring." - - # Check if the wrapper preserves the original function's signature - signature = inspect.signature(test_function) - assert str(signature) == "(xx: int) -> int" - - # Check if the wrapper preserves the original function's module - assert test_function.__module__ == __name__ - - -def test_wrap_functionality(): - """Test that wrap doesn't modify the function's behavior.""" - - @wrap - def add_one(xx: int) -> int: - return xx + 1 - - # Test with integer argument - assert add_one(1) == 2 - assert add_one(0) == 1 - assert add_one(-1) == 0 - - # Test with different parameter names - @wrap - def multiply(aa: int, bb: int) -> int: - return aa * bb - - assert multiply(2, 3) == 6 - assert multiply(aa=2, bb=3) == 6 - assert multiply(2, bb=3) == 6 - - -def test_wrap_manual_usage(): - """Test using wrap as a function rather than a decorator.""" - - def subtract(xx: int, yy: int) -> int: - return xx - yy - - wrapped_func = wrap(subtract) - - assert wrapped_func(5, 3) == 2 - assert wrapped_func(xx=10, yy=5) == 5 - - -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Time-stamp: "2024-11-07 05:57:34 (ywatanabe)" -# -# import functools -# -# -# def wrap(func): -# """Basic function wrapper that preserves function metadata. -# -# Usage: -# @wrap -# def my_function(x): -# return x + 1 -# -# # Or manually: -# def my_function(x): -# return x + 1 -# wrapped_func = wrap(my_function) -# -# This wrapper is useful as a template for creating more complex decorators -# or when you want to ensure function metadata is preserved. -# """ -# @functools.wraps(func) -# def wrapper(*args, **kwargs): -# return func(*args, **kwargs) -# -# return wrapper -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# -------------------------------------------------------------------------------- - -if __name__ == "__main__": - import os - - import pytest - - pytest.main([os.path.abspath(__file__)]) - -# -------------------------------------------------------------------------------- -# Start of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_wrap.py -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Timestamp: "2025-05-01 09:16:13 (ywatanabe)" -# # File: /home/ywatanabe/proj/scitex_repo/src/scitex/decorators/_wrap.py -# # ---------------------------------------- -# import os -# -# __FILE__ = "./src/scitex/decorators/_wrap.py" -# __DIR__ = os.path.dirname(__FILE__) -# # ---------------------------------------- -# -# -# def wrap(func): -# """Basic function wrapper that preserves function metadata. -# Usage: -# @wrap -# def my_function(x): -# return x + 1 -# # Or manually: -# def my_function(x): -# return x + 1 -# wrapped_func = wrap(my_function) -# This wrapper is useful as a template for creating more complex decorators -# or when you want to ensure function metadata is preserved. -# """ -# import functools -# -# @functools.wraps(func) -# def wrapper(*args, **kwargs): -# return func(*args, **kwargs) -# -# # Store reference to original function -# wrapper._original_func = func -# # Mark as a wrapper for detection -# wrapper._is_wrapper = True -# return wrapper -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# End of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_wrap.py -# -------------------------------------------------------------------------------- diff --git a/tests/scitex/decorators/test__xarray_fn.py b/tests/scitex/decorators/test__xarray_fn.py deleted file mode 100644 index 88ee3238..00000000 --- a/tests/scitex/decorators/test__xarray_fn.py +++ /dev/null @@ -1,234 +0,0 @@ -#!/usr/bin/env python3 -# Timestamp: "2025-04-30 16:25:56 (ywatanabe)" -# File: /home/ywatanabe/proj/scitex_repo/tests/scitex/decorators/test__xarray_fn.py -# ---------------------------------------- -import os - -__FILE__ = "./tests/scitex/decorators/test__xarray_fn.py" -__DIR__ = os.path.dirname(__FILE__) -# ---------------------------------------- - -from functools import wraps -from unittest.mock import patch - -import numpy as np -import pytest - -# Required for scitex.decorators module -pytest.importorskip("tqdm") - -# Optional dependencies -pd = pytest.importorskip("pandas") -torch = pytest.importorskip("torch") -xr = pytest.importorskip("xarray") - -from scitex.decorators import xarray_fn - - -@pytest.fixture -def test_data(): - """Create test data for tests.""" - return { - "list": [1.0, 2.0, 3.0], - "numpy": np.array([1.0, 2.0, 3.0]), - "pandas_series": pd.Series([1.0, 2.0, 3.0]), - "pandas_df": pd.DataFrame({"col1": [1.0, 2.0, 3.0]}), - "torch": torch.tensor([1.0, 2.0, 3.0]), - "xarray": xr.DataArray([1.0, 2.0, 3.0]), - } - - -def test_xarray_fn_with_list_input(test_data): - """Test xarray_fn with list input.""" - - @xarray_fn - def dummy_function(arr): - # Check that input is indeed a DataArray - assert isinstance(arr, xr.DataArray) - return arr + 1.0 - - # Input is a list, output should be list - result = dummy_function(test_data["list"]) - assert isinstance(result, list) - assert result == [2.0, 3.0, 4.0] - - -def test_xarray_fn_with_xarray_input(test_data): - """Test xarray_fn with xarray input.""" - - @xarray_fn - def dummy_function(arr): - assert isinstance(arr, xr.DataArray) - return arr * 2.0 - - # Input is xarray, output should be xarray - result = dummy_function(test_data["xarray"]) - assert isinstance(result, xr.DataArray) - xr.testing.assert_allclose(result, xr.DataArray([2.0, 4.0, 6.0])) - - -def test_xarray_fn_with_numpy_input(test_data): - """Test xarray_fn with numpy input.""" - - @xarray_fn - def dummy_function(arr): - assert isinstance(arr, xr.DataArray) - return arr * 3.0 - - # Input is numpy, output should be numpy - result = dummy_function(test_data["numpy"]) - assert isinstance(result, np.ndarray) - np.testing.assert_allclose(result, np.array([3.0, 6.0, 9.0])) - - -def test_xarray_fn_nested_decorator(test_data): - """Test nested decorator behavior with xarray_fn.""" - - # Create a dummy decorator to simulate nesting - def dummy_decorator(func): - @wraps(func) - def wrapper(*args, **kwargs): - # Set nested context - wrapper._current_decorator = "dummy_decorator" - return func(*args, **kwargs) - - wrapper._is_wrapper = True - return wrapper - - # Apply both decorators (nested) - @xarray_fn - @dummy_decorator - def nested_function(arr): - # In nested mode, the type should pass through unchanged from dummy_decorator - assert not isinstance(arr, xr.DataArray) - return arr - - with patch("scitex.decorators._xarray_fn.is_nested_decorator", return_value=True): - # Input list should stay as list due to nested context - result = nested_function(test_data["torch"]) - assert isinstance(result, torch.Tensor) - torch.testing.assert_close(result, test_data["torch"]) - - -if __name__ == "__main__": - import os - - import pytest - - pytest.main([os.path.abspath(__file__)]) - -# -------------------------------------------------------------------------------- -# Start of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_xarray_fn.py -# -------------------------------------------------------------------------------- -# #!/usr/bin/env python3 -# # -*- coding: utf-8 -*- -# # Timestamp: "2025-04-30 15:41:19 (ywatanabe)" -# # File: /home/ywatanabe/proj/scitex_repo/src/scitex/decorators/_xarray_fn.py -# # ---------------------------------------- -# import os -# -# __FILE__ = "./src/scitex/decorators/_xarray_fn.py" -# __DIR__ = os.path.dirname(__FILE__) -# # ---------------------------------------- -# from functools import wraps -# from typing import Any as _Any -# from typing import Callable -# -# import numpy as np -# -# from ._converters import is_nested_decorator -# -# -# def xarray_fn(func: Callable) -> Callable: -# @wraps(func) -# def wrapper(*args: _Any, **kwargs: _Any) -> _Any: -# # Skip conversion if already in a nested decorator context -# if is_nested_decorator(): -# results = func(*args, **kwargs) -# return results -# -# # Set the current decorator context -# wrapper._current_decorator = "xarray_fn" -# -# # Store original object for type preservation -# original_object = args[0] if args else None -# -# # Convert args to xarray DataArrays -# def to_xarray(data): -# import xarray as xr -# import pandas as pd -# import torch -# -# if isinstance(data, xr.DataArray): -# return data -# elif isinstance(data, np.ndarray): -# return xr.DataArray(data) -# elif isinstance(data, list): -# return xr.DataArray(data) -# elif hasattr(data, "__class__") and data.__class__.__name__ == "Tensor": -# return xr.DataArray(data.detach().cpu().numpy()) -# elif hasattr(data, "__class__") and data.__class__.__name__ == "DataFrame": -# return xr.DataArray(data.values) -# elif hasattr(data, "__class__") and data.__class__.__name__ == "Series": -# return xr.DataArray(data.values) -# else: -# return xr.DataArray([data]) -# -# converted_args = [to_xarray(arg) for arg in args] -# converted_kwargs = {k: to_xarray(v) for k, v in kwargs.items()} -# -# # Assertion to ensure all args are converted to xarray DataArrays -# import xarray as xr -# -# for arg_index, arg in enumerate(converted_args): -# assert isinstance(arg, xr.DataArray), ( -# f"Argument {arg_index} not converted to DataArray: {type(arg)}" -# ) -# -# results = func(*converted_args, **converted_kwargs) -# -# # Convert results back to original input types -# import xarray as xr -# -# if isinstance(results, xr.DataArray): -# if original_object is not None: -# if isinstance(original_object, list): -# return results.values.tolist() -# elif isinstance(original_object, np.ndarray): -# return results.values -# elif ( -# hasattr(original_object, "__class__") -# and original_object.__class__.__name__ == "Tensor" -# ): -# import torch -# -# return torch.tensor(results.values) -# elif ( -# hasattr(original_object, "__class__") -# and original_object.__class__.__name__ == "DataFrame" -# ): -# import pandas as pd -# -# return pd.DataFrame(results.values) -# elif ( -# hasattr(original_object, "__class__") -# and original_object.__class__.__name__ == "Series" -# ): -# import pandas as pd -# -# return pd.Series(results.values.flatten()) -# return results -# -# return results -# -# # Mark as a wrapper for detection -# wrapper._is_wrapper = True -# wrapper._decorator_type = "xarray_fn" -# return wrapper -# -# -# # EOF - -# -------------------------------------------------------------------------------- -# End of Source Code from: /home/ywatanabe/proj/scitex-code/src/scitex/decorators/_xarray_fn.py -# --------------------------------------------------------------------------------