@@ -4044,11 +4044,22 @@ def test_make_params(self, sigma):
40444044 ((1 , 26 , 28 ), (23 , 23 ), 1.7 ),
40454045 ],
40464046 )
4047- @pytest .mark .parametrize ("dtype" , [torch .float32 , torch .float64 , torch .float16 ])
4047+ @pytest .mark .parametrize ("dtype" , [torch .uint8 , torch . float32 , torch .float64 , torch .float16 ])
40484048 @pytest .mark .parametrize ("device" , cpu_and_cuda ())
4049- def test_functional_image_correctness (self , dimensions , kernel_size , sigma , dtype , device ):
4049+ @pytest .mark .parametrize (
4050+ "input_type" ,
4051+ [
4052+ tv_tensors .Image ,
4053+ pytest .param (
4054+ "cvcuda.Tensor" , marks = pytest .mark .skipif (not CVCUDA_AVAILABLE , reason = "CVCUDA not available" )
4055+ ),
4056+ ],
4057+ )
4058+ def test_functional_image_correctness (self , dimensions , kernel_size , sigma , dtype , device , input_type ):
40504059 if dtype is torch .float16 and device == "cpu" :
40514060 pytest .skip ("The CPU implementation of float16 on CPU differs from opencv" )
4061+ if (dtype != torch .float32 and dtype != torch .uint8 ) and input_type == "cvcuda.Tensor" :
4062+ pytest .skip ("CVCUDA does not support non-float32 or uint8 dtypes for gaussian blur" )
40524063
40534064 num_channels , height , width = dimensions
40544065
@@ -4068,45 +4079,17 @@ def test_functional_image_correctness(self, dimensions, kernel_size, sigma, dtyp
40684079 device = device ,
40694080 )
40704081
4071- actual = F .gaussian_blur_image (image , kernel_size = kernel_size , sigma = sigma )
4072-
4073- torch .testing .assert_close (actual , expected , rtol = 0 , atol = 1 )
4074-
4075- @pytest .mark .skipif (not CVCUDA_AVAILABLE , reason = "test requires CVCUDA" )
4076- @needs_cuda
4077- @pytest .mark .parametrize (
4078- ("dimensions" , "kernel_size" , "sigma" ),
4079- [
4080- ((10 , 12 ), (3 , 3 ), 0.8 ),
4081- ((10 , 12 ), (3 , 3 ), 0.5 ),
4082- ((10 , 12 ), (3 , 5 ), 0.8 ),
4083- ((10 , 12 ), (3 , 5 ), 0.5 ),
4084- ((26 , 28 ), (23 , 23 ), 1.7 ),
4085- ],
4086- )
4087- @pytest .mark .parametrize ("color_space" , ["RGB" , "GRAY" ])
4088- @pytest .mark .parametrize ("batch_dims" , [(1 ,), (2 ,), (4 ,)])
4089- @pytest .mark .parametrize ("dtype" , [torch .uint8 , torch .float32 ])
4090- def test_functional_cvcuda_correctness (self , dimensions , kernel_size , sigma , color_space , batch_dims , dtype ):
4091- height , width = dimensions
4082+ if input_type == "cvcuda.Tensor" :
4083+ image = image .unsqueeze (0 )
4084+ image = F .to_cvcuda_tensor (image )
40924085
4093- image_tensor = make_image (
4094- size = (height , width ), color_space = color_space , batch_dims = batch_dims , dtype = dtype , device = "cuda"
4095- )
4096- image_cvcuda = F .to_cvcuda_tensor (image_tensor )
4086+ actual = F .gaussian_blur (image , kernel_size = kernel_size , sigma = sigma )
40974087
4098- expected = F . gaussian_blur_image ( image_tensor , kernel_size = kernel_size , sigma = sigma )
4099- actual = F ._misc . _gaussian_blur_cvcuda ( image_cvcuda , kernel_size = kernel_size , sigma = sigma )
4100- actual_torch = F . cvcuda_to_tensor ( actual )
4088+ if input_type == "cvcuda.Tensor" :
4089+ actual = F .cvcuda_to_tensor ( actual )
4090+ actual = actual . squeeze ( 0 ). to ( device = device )
41014091
4102- if dtype .is_floating_point :
4103- # floating point precision differences between torch and cvcuda are present
4104- # observed greatest absolute error is 0.3
4105- # most likely stemming from different implementation
4106- torch .testing .assert_close (actual_torch , expected , rtol = 0 , atol = 0.3 )
4107- else :
4108- # uint8/16 gaussians can differ by up to max-value, most likely an overflow issue
4109- torch .testing .assert_close (actual_torch , expected , rtol = 0 , atol = get_max_value (dtype ))
4092+ torch .testing .assert_close (actual , expected , rtol = 0 , atol = 1 )
41104093
41114094
41124095class TestGaussianNoise :
0 commit comments