diff --git a/python/featomic/featomic/clebsch_gordan/_coefficients.py b/python/featomic/featomic/clebsch_gordan/_coefficients.py index b3dd47187..85d0c3b85 100644 --- a/python/featomic/featomic/clebsch_gordan/_coefficients.py +++ b/python/featomic/featomic/clebsch_gordan/_coefficients.py @@ -274,7 +274,10 @@ def _cg_coeff_dict_to_tensormap_dense( blocks.append( TensorBlock( values=_dispatch.contiguous(l1l2lam_values.reshape(block_value_shape)), - samples=Labels.range("_", 1), + samples=Labels( + ["_"], + _dispatch.int_array_like([0], labels_values_like).reshape(-1, 1), + ), components=[ Labels( ["m1"], @@ -295,7 +298,10 @@ def _cg_coeff_dict_to_tensormap_dense( ).reshape(-1, 1), ), ], - properties=Labels.range("cg_coefficient", 1), + properties=Labels( + ["cg_coefficient"], + _dispatch.int_array_like([0], labels_values_like).reshape(-1, 1), + ), ) ) @@ -351,7 +357,10 @@ def _cg_coeff_dict_to_tensormap_sparse( values=_dispatch.contiguous(values), samples=Labels(["m1", "m2", "mu"], l1l2lam_sample_values), components=[], - properties=Labels.range("cg_coefficient", 1), + properties=Labels( + ["cg_coefficient"], + _dispatch.int_array_like([0], labels_values_like).reshape(-1, 1), + ), ) ) diff --git a/python/featomic/featomic/clebsch_gordan/_dispatch.py b/python/featomic/featomic/clebsch_gordan/_dispatch.py index 43d9a8d01..2bc1e8d7d 100644 --- a/python/featomic/featomic/clebsch_gordan/_dispatch.py +++ b/python/featomic/featomic/clebsch_gordan/_dispatch.py @@ -279,46 +279,48 @@ def int_range_like(min_val: int, max_val: int, like): array dtype and device. """ if isinstance(like, TorchTensor): - return torch.arange(min_val, max_val, dtype=torch.int64, device=like.device) + return torch.arange(min_val, max_val, dtype=like.dtype, device=like.device) elif isinstance(like, np.ndarray): - return np.arange(min_val, max_val).astype(np.int64) + return np.arange(min_val, max_val).astype(like.dtype) else: raise TypeError(UNKNOWN_ARRAY_TYPE) def int_array_like(int_list: Union[List[int], List[List[int]]], like): """ - Converts the input list of int to a numpy array or torch tensor - based on the type of `like`. + Converts the input list of int to a numpy array or torch tensor based on the type of + ``like``. The resulting array/tensor will have the same dtype and device as + ``like``. """ if isinstance(like, TorchTensor): if torch.jit.isinstance(int_list, List[int]): - return torch.tensor(int_list, dtype=torch.int64, device=like.device) + return torch.tensor(int_list, dtype=like.dtype, device=like.device) else: - return torch.tensor(int_list, dtype=torch.int64, device=like.device) + return torch.tensor(int_list, dtype=like.dtype, device=like.device) elif isinstance(like, np.ndarray): - return np.array(int_list).astype(np.int64) + return np.array(int_list).astype(like.dtype) else: raise TypeError(UNKNOWN_ARRAY_TYPE) def real_array_like(float_list: List[float], like): """ - Converts the input list of float to a numpy array or torch tensor - based on the array type of `like`. + Converts the input list of float to a numpy array or torch tensor based on the array + type of ``like``. The resulting array/tensor will have the same dtype and device as + ``like``. """ if isinstance(like, TorchTensor): - return torch.tensor(float_list, dtype=torch.float64, device=like.device) + return torch.tensor(float_list, dtype=like.dtype, device=like.device) elif isinstance(like, np.ndarray): - return np.array(float_list).astype(np.float64) + return np.array(float_list).astype(like.dtype) else: raise TypeError(UNKNOWN_ARRAY_TYPE) def bool_array_like(bool_list: List[bool], like): """ - Converts the input list of bool to a numpy array or torch tensor - based on the type of `like`. + Converts the input list of bool to a numpy array or torch tensor based on the type + of ``like``. The resulting array/tensor will have the same device as ``like``. """ if isinstance(like, TorchTensor): return torch.tensor(bool_list, dtype=torch.bool, device=like.device) diff --git a/python/featomic_torch/tests/clebsch_gordan/density_correlations.py b/python/featomic_torch/tests/clebsch_gordan/density_correlations.py index 4a133fbb8..37059f731 100644 --- a/python/featomic_torch/tests/clebsch_gordan/density_correlations.py +++ b/python/featomic_torch/tests/clebsch_gordan/density_correlations.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- import io +import os import metatensor.torch import pytest @@ -90,3 +91,43 @@ def test_jit_save_load(cg_backend: str): torch.jit.save(calculator, buffer) buffer.seek(0) torch.jit.load(buffer) + + +def can_use_mps_backend(): + import torch + + return ( + # Github Actions M1 runners don't have a GPU accessible + os.environ.get("GITHUB_ACTIONS") is None + and hasattr(torch.backends, "mps") + and torch.backends.mps.is_built() + and torch.backends.mps.is_available() + ) + + +@pytest.mark.parametrize("cg_backend", ["python-dense", "python-sparse"]) +def test_dtype_device(cg_backend): + dtype_device = [ + (torch.float32, "cpu"), + (torch.float64, "cpu"), + ] + + if can_use_mps_backend(): + dtype_device.append((torch.float32, "mps")) + + if torch.cuda.is_available(): + dtype_device.append((torch.float32, "cuda")) + dtype_device.append((torch.float64, "cuda")) + + for dtype, device in dtype_device: + nu_1 = spherical_expansion().to(dtype).to(device) + + density_correlations = DensityCorrelations( + n_correlations=1, + max_angular=3, + cg_backend=cg_backend, + dtype=dtype, + device=device, + ) + + density_correlations.compute(nu_1, angular_cutoff=2)