|
28 | 28 | from cuda.core.experimental._utils.cuda_utils import handle_return |
29 | 29 | from cuda.core.experimental.utils import StridedMemoryView |
30 | 30 |
|
31 | | -from cuda_python_test_helpers import IS_WSL, supports_ipc_mempool |
| 31 | +from cuda_python_test_helpers import supports_ipc_mempool |
32 | 32 |
|
33 | 33 | POOL_SIZE = 2097152 # 2MB size |
34 | 34 |
|
@@ -322,13 +322,13 @@ def test_vmm_allocator_basic_allocation(): |
322 | 322 | This test verifies that VirtualMemoryResource can allocate memory |
323 | 323 | using CUDA VMM APIs with default configuration. |
324 | 324 | """ |
325 | | - if platform.system() == "Windows": |
326 | | - pytest.skip("VirtualMemoryResource is not supported on Windows TCC") |
327 | | - if IS_WSL: |
328 | | - pytest.skip("VirtualMemoryResource is not supported on WSL") |
329 | | - |
330 | 325 | device = Device() |
331 | 326 | device.set_current() |
| 327 | + |
| 328 | + # Skip if virtual memory management is not supported |
| 329 | + if not device.properties.virtual_memory_management_supported: |
| 330 | + pytest.skip("Virtual memory management is not supported on this device") |
| 331 | + |
332 | 332 | options = VirtualMemoryResourceOptions() |
333 | 333 | # Create VMM allocator with default config |
334 | 334 | vmm_mr = VirtualMemoryResource(device, config=options) |
@@ -361,13 +361,17 @@ def test_vmm_allocator_policy_configuration(): |
361 | 361 | with different allocation policies and that the configuration affects |
362 | 362 | the allocation behavior. |
363 | 363 | """ |
364 | | - if platform.system() == "Windows": |
365 | | - pytest.skip("VirtualMemoryResource is not supported on Windows TCC") |
366 | | - if IS_WSL: |
367 | | - pytest.skip("VirtualMemoryResource is not supported on WSL") |
368 | 364 | device = Device() |
369 | 365 | device.set_current() |
370 | 366 |
|
| 367 | + # Skip if virtual memory management is not supported |
| 368 | + if not device.properties.virtual_memory_management_supported: |
| 369 | + pytest.skip("Virtual memory management is not supported on this device") |
| 370 | + |
| 371 | + # Skip if GPU Direct RDMA is supported (we want to test the unsupported case) |
| 372 | + if not device.properties.gpu_direct_rdma_supported: |
| 373 | + pytest.skip("This test requires a device that doesn't support GPU Direct RDMA") |
| 374 | + |
371 | 375 | # Test with custom VMM config |
372 | 376 | custom_config = VirtualMemoryResourceOptions( |
373 | 377 | allocation_type="pinned", |
@@ -420,13 +424,13 @@ def test_vmm_allocator_grow_allocation(): |
420 | 424 | This test verifies that VirtualMemoryResource can grow existing |
421 | 425 | allocations while preserving the base pointer when possible. |
422 | 426 | """ |
423 | | - if platform.system() == "Windows": |
424 | | - pytest.skip("VirtualMemoryResource is not supported on Windows TCC") |
425 | | - if IS_WSL: |
426 | | - pytest.skip("VirtualMemoryResource is not supported on WSL") |
427 | 427 | device = Device() |
428 | 428 | device.set_current() |
429 | 429 |
|
| 430 | + # Skip if virtual memory management is not supported (we need it for VMM) |
| 431 | + if not device.properties.virtual_memory_management_supported: |
| 432 | + pytest.skip("Virtual memory management is not supported on this device") |
| 433 | + |
430 | 434 | options = VirtualMemoryResourceOptions() |
431 | 435 |
|
432 | 436 | vmm_mr = VirtualMemoryResource(device, config=options) |
@@ -458,6 +462,29 @@ def test_vmm_allocator_grow_allocation(): |
458 | 462 | grown_buffer.close() |
459 | 463 |
|
460 | 464 |
|
| 465 | +def test_vmm_allocator_rdma_unsupported_exception(): |
| 466 | + """Test that VirtualMemoryResource throws an exception when RDMA is requested but device doesn't support it. |
| 467 | +
|
| 468 | + This test verifies that the VirtualMemoryResource constructor throws a RuntimeError |
| 469 | + when gpu_direct_rdma=True is requested but the device doesn't support virtual memory management. |
| 470 | + """ |
| 471 | + device = Device() |
| 472 | + device.set_current() |
| 473 | + |
| 474 | + # Skip if virtual memory management is not supported (we need it for VMM) |
| 475 | + if not device.properties.virtual_memory_management_supported: |
| 476 | + pytest.skip("Virtual memory management is not supported on this device") |
| 477 | + |
| 478 | + # Skip if GPU Direct RDMA is supported (we want to test the unsupported case) |
| 479 | + if device.properties.gpu_direct_rdma_supported: |
| 480 | + pytest.skip("This test requires a device that doesn't support GPU Direct RDMA") |
| 481 | + |
| 482 | + # Test that requesting RDMA on an unsupported device throws an exception |
| 483 | + options = VirtualMemoryResourceOptions(gpu_direct_rdma=True) |
| 484 | + with pytest.raises(RuntimeError, match="GPU Direct RDMA is not supported on this device"): |
| 485 | + VirtualMemoryResource(device, config=options) |
| 486 | + |
| 487 | + |
461 | 488 | def test_mempool(mempool_device): |
462 | 489 | device = mempool_device |
463 | 490 |
|
|
0 commit comments