diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index 3ce603c3ed2..de821b70469 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -21,6 +21,7 @@ import torchvision.transforms.v2 as transforms from common_utils import ( + assert_close, assert_equal, cache, cpu_and_cuda, @@ -41,7 +42,6 @@ ) from torch import nn -from torch.testing import assert_close from torch.utils._pytree import tree_flatten, tree_map from torch.utils.data import DataLoader, default_collate from torchvision import tv_tensors @@ -5552,24 +5552,34 @@ def test_kernel_image(self, mean, std, device): @pytest.mark.parametrize("device", cpu_and_cuda()) def test_kernel_image_inplace(self, device): - input = make_image_tensor(dtype=torch.float32, device=device) - input_version = input._version + inpt = make_image_tensor(dtype=torch.float32, device=device) + input_version = inpt._version - output_out_of_place = F.normalize_image(input, mean=self.MEAN, std=self.STD) - assert output_out_of_place.data_ptr() != input.data_ptr() - assert output_out_of_place is not input + output_out_of_place = F.normalize_image(inpt, mean=self.MEAN, std=self.STD) + assert output_out_of_place.data_ptr() != inpt.data_ptr() + assert output_out_of_place is not inpt - output_inplace = F.normalize_image(input, mean=self.MEAN, std=self.STD, inplace=True) - assert output_inplace.data_ptr() == input.data_ptr() + output_inplace = F.normalize_image(inpt, mean=self.MEAN, std=self.STD, inplace=True) + assert output_inplace.data_ptr() == inpt.data_ptr() assert output_inplace._version > input_version - assert output_inplace is input + assert output_inplace is inpt assert_equal(output_inplace, output_out_of_place) def test_kernel_video(self): check_kernel(F.normalize_video, make_video(dtype=torch.float32), mean=self.MEAN, std=self.STD) - @pytest.mark.parametrize("make_input", [make_image_tensor, make_image, make_video]) + @pytest.mark.parametrize( + "make_input", + [ + make_image_tensor, + make_image, + make_video, + pytest.param( + make_image_cvcuda, marks=pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="test requires CVCUDA") + ), + ], + ) def test_functional(self, make_input): check_functional(F.normalize, make_input(dtype=torch.float32), mean=self.MEAN, std=self.STD) @@ -5579,9 +5589,16 @@ def test_functional(self, make_input): (F.normalize_image, torch.Tensor), (F.normalize_image, tv_tensors.Image), (F.normalize_video, tv_tensors.Video), + pytest.param( + F._misc._normalize_image_cvcuda, + None, + marks=pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="test requires CVCUDA"), + ), ], ) def test_functional_signature(self, kernel, input_type): + if kernel is F._misc._normalize_image_cvcuda: + input_type = _import_cvcuda().Tensor check_functional_kernel_signature_match(F.normalize, kernel=kernel, input_type=input_type) def test_functional_error(self): @@ -5595,9 +5612,9 @@ def test_functional_error(self): with pytest.raises(ValueError, match="std evaluated to zero, leading to division by zero"): F.normalize_image(make_image(dtype=torch.float32), mean=self.MEAN, std=std) - def _sample_input_adapter(self, transform, input, device): + def _sample_input_adapter(self, transform, inpt, device): adapted_input = {} - for key, value in input.items(): + for key, value in inpt.items(): if isinstance(value, PIL.Image.Image): # normalize doesn't support PIL images continue @@ -5607,7 +5624,17 @@ def _sample_input_adapter(self, transform, input, device): adapted_input[key] = value return adapted_input - @pytest.mark.parametrize("make_input", [make_image_tensor, make_image, make_video]) + @pytest.mark.parametrize( + "make_input", + [ + make_image_tensor, + make_image, + make_video, + pytest.param( + make_image_cvcuda, marks=pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="test requires CVCUDA") + ), + ], + ) def test_transform(self, make_input): check_transform( transforms.Normalize(mean=self.MEAN, std=self.STD), @@ -5622,14 +5649,33 @@ def _reference_normalize_image(self, image, *, mean, std): @pytest.mark.parametrize(("mean", "std"), MEANS_STDS) @pytest.mark.parametrize("dtype", [torch.float16, torch.float32, torch.float64]) + @pytest.mark.parametrize( + "make_input", + [ + make_image, + pytest.param( + make_image_cvcuda, marks=pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="test requires CVCUDA") + ), + ], + ) @pytest.mark.parametrize("fn", [F.normalize, transform_cls_to_functional(transforms.Normalize)]) - def test_correctness_image(self, mean, std, dtype, fn): - image = make_image(dtype=dtype) + def test_correctness_image(self, mean, std, dtype, make_input, fn): + if make_input == make_image_cvcuda and dtype != torch.float32: + pytest.skip("CVCUDA only supports float32 for normalize") + + image = make_input(dtype=dtype) actual = fn(image, mean=mean, std=std) + + if make_input == make_image_cvcuda: + image = F.cvcuda_to_tensor(image)[0].cpu() + expected = self._reference_normalize_image(image, mean=mean, std=std) - assert_equal(actual, expected) + if make_input == make_image_cvcuda: + assert_close(actual, expected, rtol=0, atol=1e-6) + else: + assert_equal(actual, expected) class TestClampBoundingBoxes: diff --git a/torchvision/transforms/v2/_misc.py b/torchvision/transforms/v2/_misc.py index 305149c87b1..f15a9e3c62a 100644 --- a/torchvision/transforms/v2/_misc.py +++ b/torchvision/transforms/v2/_misc.py @@ -9,6 +9,7 @@ from torchvision import transforms as _transforms, tv_tensors from torchvision.transforms.v2 import functional as F, Transform +from torchvision.transforms.v2.functional._utils import _is_cvcuda_available, _is_cvcuda_tensor from ._utils import ( _parse_labels_getter, @@ -21,6 +22,9 @@ ) +CVCUDA_AVAILABLE = _is_cvcuda_available() + + # TODO: do we want/need to expose this? class Identity(Transform): def transform(self, inpt: Any, params: dict[str, Any]) -> Any: @@ -160,6 +164,9 @@ class Normalize(Transform): _v1_transform_cls = _transforms.Normalize + if CVCUDA_AVAILABLE: + _transformed_types = Transform._transformed_types + (_is_cvcuda_tensor,) + def __init__(self, mean: Sequence[float], std: Sequence[float], inplace: bool = False): super().__init__() self.mean = list(mean) diff --git a/torchvision/transforms/v2/_utils.py b/torchvision/transforms/v2/_utils.py index bb6051b4e61..e803aa49c60 100644 --- a/torchvision/transforms/v2/_utils.py +++ b/torchvision/transforms/v2/_utils.py @@ -16,7 +16,7 @@ from torchvision.transforms.transforms import _check_sequence_input, _setup_angle, _setup_size # noqa: F401 from torchvision.transforms.v2.functional import get_dimensions, get_size, is_pure_tensor -from torchvision.transforms.v2.functional._utils import _FillType, _FillTypeJIT +from torchvision.transforms.v2.functional._utils import _FillType, _FillTypeJIT, _is_cvcuda_tensor def _setup_number_or_seq(arg: int | float | Sequence[int | float], name: str) -> Sequence[float]: @@ -182,7 +182,7 @@ def query_chw(flat_inputs: list[Any]) -> tuple[int, int, int]: chws = { tuple(get_dimensions(inpt)) for inpt in flat_inputs - if check_type(inpt, (is_pure_tensor, tv_tensors.Image, PIL.Image.Image, tv_tensors.Video)) + if check_type(inpt, (is_pure_tensor, tv_tensors.Image, PIL.Image.Image, tv_tensors.Video, _is_cvcuda_tensor)) } if not chws: raise TypeError("No image or video was found in the sample") @@ -207,6 +207,7 @@ def query_size(flat_inputs: list[Any]) -> tuple[int, int]: tv_tensors.Mask, tv_tensors.BoundingBoxes, tv_tensors.KeyPoints, + _is_cvcuda_tensor, ), ) } diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index 6b8f19f12f4..af03ad018d4 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -51,6 +51,16 @@ def get_dimensions_video(video: torch.Tensor) -> list[int]: return get_dimensions_image(video) +def get_dimensions_image_cvcuda(image: "cvcuda.Tensor") -> list[int]: + # CV-CUDA tensor is always in NHWC layout + # get_dimensions is CHW + return [image.shape[3], image.shape[1], image.shape[2]] + + +if CVCUDA_AVAILABLE: + _register_kernel_internal(get_dimensions, cvcuda.Tensor)(get_dimensions_image_cvcuda) + + def get_num_channels(inpt: torch.Tensor) -> int: if torch.jit.is_scripting(): return get_num_channels_image(inpt) @@ -87,6 +97,16 @@ def get_num_channels_video(video: torch.Tensor) -> int: get_image_num_channels = get_num_channels +def get_num_channels_image_cvcuda(image: "cvcuda.Tensor") -> int: + # CV-CUDA tensor is always in NHWC layout + # get_num_channels is C + return image.shape[3] + + +if CVCUDA_AVAILABLE: + _register_kernel_internal(get_num_channels, cvcuda.Tensor)(get_num_channels_image_cvcuda) + + def get_size(inpt: torch.Tensor) -> list[int]: if torch.jit.is_scripting(): return get_size_image(inpt) @@ -125,7 +145,7 @@ def get_size_image_cvcuda(image: "cvcuda.Tensor") -> list[int]: if CVCUDA_AVAILABLE: - _get_size_image_cvcuda = _register_kernel_internal(get_size, cvcuda.Tensor)(get_size_image_cvcuda) + _register_kernel_internal(get_size, _import_cvcuda().Tensor)(get_size_image_cvcuda) @_register_kernel_internal(get_size, tv_tensors.Video, tv_tensor_wrapper=False) diff --git a/torchvision/transforms/v2/functional/_misc.py b/torchvision/transforms/v2/functional/_misc.py index daf263df046..b55dc465456 100644 --- a/torchvision/transforms/v2/functional/_misc.py +++ b/torchvision/transforms/v2/functional/_misc.py @@ -1,5 +1,5 @@ import math -from typing import Optional +from typing import Optional, TYPE_CHECKING import PIL.Image import torch @@ -13,7 +13,14 @@ from ._meta import _convert_bounding_box_format -from ._utils import _get_kernel, _register_kernel_internal, is_pure_tensor +from ._utils import _get_kernel, _import_cvcuda, _is_cvcuda_available, _register_kernel_internal, is_pure_tensor + +CVCUDA_AVAILABLE = _is_cvcuda_available() + +if TYPE_CHECKING: + import cvcuda # type: ignore[import-not-found] +if CVCUDA_AVAILABLE: + cvcuda = _import_cvcuda() # noqa: F811 def normalize( @@ -72,6 +79,44 @@ def normalize_video(video: torch.Tensor, mean: list[float], std: list[float], in return normalize_image(video, mean, std, inplace=inplace) +def _normalize_image_cvcuda( + image: "cvcuda.Tensor", + mean: list[float], + std: list[float], + inplace: bool = False, +) -> "cvcuda.Tensor": + cvcuda = _import_cvcuda() + if inplace: + raise ValueError("Inplace normalization is not supported for CVCUDA.") + + # CV-CUDA supports signed int and float tensors + # torchvision only supports uint and float, right now CV-CUDA doesnt expose float16, so only check 32 + # in the future add float16 once exposed in CV-CUDA + if not (image.dtype == cvcuda.Type.F32): + raise ValueError(f"Input tensor should be a float tensor. Got {image.dtype}.") + + channels = image.shape[3] + if isinstance(mean, float | int): + mean = [mean] * channels + elif len(mean) != channels: + raise ValueError(f"Mean should have {channels} elements. Got {len(mean)}.") + if isinstance(std, float | int): + std = [std] * channels + elif len(std) != channels: + raise ValueError(f"Std should have {channels} elements. Got {len(std)}.") + + mt = torch.as_tensor(mean, dtype=torch.float32).reshape(1, 1, 1, channels).cuda() + st = torch.as_tensor(std, dtype=torch.float32).reshape(1, 1, 1, channels).cuda() + mean_cv = cvcuda.as_tensor(mt, cvcuda.TensorLayout.NHWC) + std_cv = cvcuda.as_tensor(st, cvcuda.TensorLayout.NHWC) + + return cvcuda.normalize(image, base=mean_cv, scale=std_cv, flags=cvcuda.NormalizeFlags.SCALE_IS_STDDEV) + + +if CVCUDA_AVAILABLE: + _register_kernel_internal(normalize, _import_cvcuda().Tensor)(_normalize_image_cvcuda) + + def gaussian_blur(inpt: torch.Tensor, kernel_size: list[int], sigma: Optional[list[float]] = None) -> torch.Tensor: """See :class:`~torchvision.transforms.v2.GaussianBlur` for details.""" if torch.jit.is_scripting():