Skip to content

Commit 5d5c436

Browse files
committed
simplify and improve crop testing for cvcuda
1 parent e4d5f37 commit 5d5c436

File tree

1 file changed

+44
-28
lines changed

1 file changed

+44
-28
lines changed

test/test_transforms_v2.py

Lines changed: 44 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -3484,10 +3484,26 @@ def test_functional_signature(self, kernel, input_type):
34843484
check_functional_kernel_signature_match(F.crop, kernel=kernel, input_type=input_type)
34853485

34863486
@pytest.mark.parametrize("kwargs", CORRECTNESS_CROP_KWARGS)
3487-
def test_functional_image_correctness(self, kwargs):
3488-
image = make_image(self.INPUT_SIZE, dtype=torch.uint8, device="cpu")
3487+
@pytest.mark.parametrize(
3488+
"make_input",
3489+
[
3490+
make_image,
3491+
pytest.param(
3492+
make_image_cvcuda, marks=pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="test requires CVCUDA")
3493+
),
3494+
],
3495+
)
3496+
def test_functional_image_correctness(self, kwargs, make_input):
3497+
image = make_input(self.INPUT_SIZE, dtype=torch.uint8, device="cpu")
34893498

34903499
actual = F.crop(image, **kwargs)
3500+
3501+
if make_input == make_image_cvcuda:
3502+
actual = F.cvcuda_to_tensor(actual).to(device="cpu")
3503+
actual = actual.squeeze(0)
3504+
image = F.cvcuda_to_tensor(image).to(device="cpu")
3505+
image = image.squeeze(0)
3506+
34913507
expected = F.to_image(F.crop(F.to_pil_image(image), **kwargs))
34923508

34933509
assert_equal(actual, expected)
@@ -3575,7 +3591,16 @@ def test_transform_pad_if_needed(self):
35753591
padding_mode=["constant", "edge", "reflect", "symmetric"],
35763592
)
35773593
@pytest.mark.parametrize("seed", list(range(5)))
3578-
def test_transform_image_correctness(self, param, value, seed):
3594+
@pytest.mark.parametrize(
3595+
"make_input",
3596+
[
3597+
make_image,
3598+
pytest.param(
3599+
make_image_cvcuda, marks=pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="test requires CVCUDA")
3600+
),
3601+
],
3602+
)
3603+
def test_transform_image_correctness(self, param, value, seed, make_input):
35793604
kwargs = {param: value}
35803605
if param != "size":
35813606
# 1. size is required
@@ -3586,41 +3611,32 @@ def test_transform_image_correctness(self, param, value, seed):
35863611

35873612
transform = transforms.RandomCrop(pad_if_needed=True, **kwargs)
35883613

3589-
image = make_image(self.INPUT_SIZE)
3614+
will_pad = False
3615+
if kwargs["size"][0] > self.INPUT_SIZE[0] or kwargs["size"][1] > self.INPUT_SIZE[1]:
3616+
will_pad = True
3617+
3618+
image = make_input(self.INPUT_SIZE)
35903619

35913620
with freeze_rng_state():
35923621
torch.manual_seed(seed)
35933622
actual = transform(image)
35943623

35953624
torch.manual_seed(seed)
3596-
expected = F.to_image(transform(F.to_pil_image(image)))
35973625

3598-
assert_equal(actual, expected)
3626+
if make_input == make_image_cvcuda:
3627+
actual = F.cvcuda_to_tensor(actual).to(device="cpu")
3628+
actual = actual.squeeze(0)
3629+
image = F.cvcuda_to_tensor(image).to(device="cpu")
3630+
image = image.squeeze(0)
35993631

3600-
@pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="test requires CVCUDA")
3601-
@pytest.mark.parametrize("size", [(10, 5), (25, 15), (25, 5), (10, 15), (10, 10)])
3602-
@pytest.mark.parametrize("seed", list(range(5)))
3603-
def test_transform_cvcuda_correctness(self, size, seed):
3604-
pad_if_needed = False
3605-
if size[0] > self.INPUT_SIZE[0] or size[1] > self.INPUT_SIZE[1]:
3606-
pad_if_needed = True
3607-
transform = transforms.RandomCrop(size, pad_if_needed=pad_if_needed)
3608-
3609-
image = make_image(size=self.INPUT_SIZE, batch_dims=(1,), device="cuda")
3610-
cv_image = F.to_cvcuda_tensor(image)
3611-
3612-
with freeze_rng_state():
3613-
torch.manual_seed(seed)
3614-
actual = transform(cv_image)
3615-
3616-
torch.manual_seed(seed)
3617-
expected = transform(image)
3632+
expected = F.to_image(transform(F.to_pil_image(image)))
36183633

3619-
if not pad_if_needed:
3620-
torch.testing.assert_close(F.cvcuda_to_tensor(actual), expected, rtol=0, atol=0)
3634+
if make_input == make_image_cvcuda and will_pad:
3635+
# when padding is applied, CV-CUDA will always fill with zeros
3636+
# cannot use assert_equal since it will fail unless random is all zeros
3637+
torch.testing.assert_close(actual, expected, rtol=0, atol=get_max_value(image.dtype))
36213638
else:
3622-
# if padding is requied, CV-CUDA will always fill with zeros
3623-
torch.testing.assert_close(F.cvcuda_to_tensor(actual), expected, rtol=0, atol=get_max_value(image.dtype))
3639+
assert_equal(actual, expected)
36243640

36253641
def _reference_crop_bounding_boxes(self, bounding_boxes, *, top, left, height, width):
36263642
affine_matrix = np.array(

0 commit comments

Comments
 (0)