We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 5111111 commit b9416e9Copy full SHA for b9416e9
test/hqq/test_hqq_affine.py
@@ -15,11 +15,13 @@
15
quantize_,
16
)
17
from torchao.testing.utils import skip_if_rocm
18
+from torchao.utils import get_current_accelerator_device
19
-cuda_available = torch.cuda.is_available()
20
+cuda_available = torch.accelerator.is_available()
21
+_DEVICE = get_current_accelerator_device()
22
23
# Parameters
-device = "cuda:0"
24
+device = f"{_DEVICE}:0"
25
compute_dtype = torch.bfloat16
26
group_size = 64
27
mapping_type = MappingType.ASYMMETRIC
0 commit comments