Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 1 addition & 3 deletions .github/workflows/build-wheels.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ jobs:
matrix:
os: ['ubuntu-24.04', 'ubuntu-24.04-arm', 'macos-15-intel', 'macos-15', 'windows-2022']
arch: ['arm64', 'x86_64']
torch-version: ['2.1', '2.2', '2.3', '2.4', '2.5', '2.6', '2.7', '2.8', '2.9', '2.10']
torch-version: ['2.3', '2.4', '2.5', '2.6', '2.7', '2.8', '2.9', '2.10']
exclude:
# remove mismatched arch-os combinations
- {os: macos-15-intel, arch: arm64}
Expand Down Expand Up @@ -76,8 +76,6 @@ jobs:
rust-target: x86_64-pc-windows-msvc
cibw-arch: AMD64
# add the right python version image for each torch version
- {torch-version: '2.1', cibw-python: 'cp311-*'}
- {torch-version: '2.2', cibw-python: 'cp312-*'}
- {torch-version: '2.3', cibw-python: 'cp312-*'}
- {torch-version: '2.4', cibw-python: 'cp312-*'}
- {torch-version: '2.5', cibw-python: 'cp312-*'}
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/torch-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ jobs:
include:
- os: ubuntu-24.04
python-version: "3.10"
torch-version: "2.1"
torch-version: "2.3"
numpy-version-pin: "<2.0"
- os: ubuntu-24.04
python-version: "3.10"
Expand Down
4 changes: 2 additions & 2 deletions docs/src/installation.rst
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ and use it will depend on the programming language you are using.

We provide pre-compiled wheels on PyPI that are compatible with all the
supported ``torch`` versions at the time of the ``metatomic-torch`` release.
Currently PyTorch version 2.1 and above is supported.
Currently PyTorch version 2.3 and above is supported.

If you want to use the code with an unsupported PyTorch version, or a
new release of PyTorch which did not exist yet when we released
Expand Down Expand Up @@ -105,7 +105,7 @@ and use it will depend on the programming language you are using.
metatensor-torch.
- the C++ part of PyTorch, which you can install `on its own
<https://pytorch.org/get-started/locally/>`_. We are compatible with
``libtorch`` version 2.1 or above. You can also use the same library as
``libtorch`` version 2.3 or above. You can also use the same library as
the Python version of torch by adding the output of the command below
to ``CMAKE_PREFIX_PATH``:

Expand Down
2 changes: 1 addition & 1 deletion metatomic-torch/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ endif()
# fixed version in `cmake/FindCUDNN.cmake`
set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake;${CMAKE_MODULE_PATH}")

find_package(Torch 2.1 REQUIRED)
find_package(Torch 2.3 REQUIRED)

set(METATOMIC_TORCH_HEADERS
"include/metatomic/torch/system.hpp"
Expand Down
2 changes: 1 addition & 1 deletion python/metatomic_torch/build-backend/backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
if FORCED_TORCH_VERSION is not None:
TORCH_DEP = f"torch =={FORCED_TORCH_VERSION}"
else:
TORCH_DEP = "torch >=2.1"
TORCH_DEP = "torch >=2.3"

# ==================================================================================== #
# Build backend functions definition #
Expand Down
130 changes: 23 additions & 107 deletions python/metatomic_torch/metatomic/torch/ase_calculator.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import metatensor.torch as mts
import numpy as np
import torch
import vesin
import vesin.metatomic
from metatensor.torch import Labels, TensorBlock, TensorMap
from torch.profiler import record_function

Expand All @@ -20,7 +20,6 @@
load_atomistic_model,
pick_device,
pick_output,
register_autograd_neighbors,
)


Expand Down Expand Up @@ -384,17 +383,6 @@ def run_model(
atoms=atoms, dtype=self._dtype, device=self._device
)
system = System(types, positions, cell, pbc)
# Compute the neighbors lists requested by the model
for options in self._model.requested_neighbor_lists():
neighbors = _compute_ase_neighbors(
atoms, options, dtype=self._dtype, device=self._device
)
register_autograd_neighbors(
system,
neighbors,
check_consistency=self.parameters["check_consistency"],
)
system.add_neighbor_list(options, neighbors)
# Get the additional inputs requested by the model
for name, option in self._model.requested_inputs().items():
input_tensormap = _get_ase_input(
Expand All @@ -403,6 +391,14 @@ def run_model(
system.add_data(name, input_tensormap)
systems.append(system)

# Compute the neighbors lists requested by the model
vesin.metatomic.compute_requested_neighbors(
systems=systems,
system_length_unit="angstrom",
model=self._model,
check_consistency=self.parameters["check_consistency"],
)

available_outputs = self._model.capabilities().outputs
for key in outputs:
if key not in available_outputs:
Expand Down Expand Up @@ -537,16 +533,12 @@ def calculate(
with record_function("MetatomicCalculator::compute_neighbors"):
# convert from ase.Atoms to metatomic.torch.System
system = System(types, positions, cell, pbc)
for options in self._model.requested_neighbor_lists():
neighbors = _compute_ase_neighbors(
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you remove the code for _compute_ase_neighbors?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

My bad, should be done now. For now, I'm calling it with [system] as an argument within the loop (same as the old code) but we could also call it when all systems are created (and therefore using systems as the argument)

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If you want to also include this change, it works for me!

atoms, options, dtype=self._dtype, device=self._device
)
register_autograd_neighbors(
system,
neighbors,
check_consistency=self.parameters["check_consistency"],
)
system.add_neighbor_list(options, neighbors)
vesin.metatomic.compute_requested_neighbors(
systems=[system],
system_length_unit="angstrom",
model=self._model,
check_consistency=self.parameters["check_consistency"],
)

with record_function("MetatomicCalculator::get_model_inputs"):
for name, option in self._model.requested_inputs().items():
Expand Down Expand Up @@ -726,19 +718,16 @@ def compute_energy(
cell = cell @ strain
strains.append(strain)
system = System(types, positions, cell, pbc)
# Compute the neighbors lists requested by the model
for options in self._model.requested_neighbor_lists():
neighbors = _compute_ase_neighbors(
atoms, options, dtype=self._dtype, device=self._device
)
register_autograd_neighbors(
system,
neighbors,
check_consistency=self.parameters["check_consistency"],
)
system.add_neighbor_list(options, neighbors)
systems.append(system)

# Compute the neighbors lists requested by the model
vesin.metatomic.compute_requested_neighbors(
systems=systems,
system_length_unit="angstrom",
model=self._model,
check_consistency=self.parameters["check_consistency"],
)

options = ModelEvaluationOptions(
length_unit="angstrom",
outputs=outputs,
Expand Down Expand Up @@ -901,79 +890,6 @@ def _ase_properties_to_metatensor_outputs(
return metatensor_outputs


def _compute_ase_neighbors(atoms, options, dtype, device):
# options.strict is ignored by this function, since `ase.neighborlist.neighbor_list`
# only computes strict NL, and these are valid even with `strict=False`

if np.all(atoms.pbc) or np.all(~atoms.pbc):
nl_i, nl_j, nl_S, nl_D = vesin.ase_neighbor_list(
"ijSD",
atoms,
cutoff=options.engine_cutoff(engine_length_unit="angstrom"),
)
else:
nl_i, nl_j, nl_S, nl_D = ase.neighborlist.neighbor_list(
"ijSD",
atoms,
cutoff=options.engine_cutoff(engine_length_unit="angstrom"),
)

if not options.full_list:
# The pair selection code here below avoids a relatively slow loop over
# all pairs to improve performance
reject_condition = (
# we want a half neighbor list, so drop all duplicated neighbors
(nl_j < nl_i)
| (
(nl_i == nl_j)
& (
# only create pairs with the same atom twice if the pair spans more
# than one unit cell
((nl_S[:, 0] == 0) & (nl_S[:, 1] == 0) & (nl_S[:, 2] == 0))
# When creating pairs between an atom and one of its periodic
# images, the code generates multiple redundant pairs
# (e.g. with shifts 0 1 1 and 0 -1 -1); and we want to only keep one
# of these. We keep the pair in the positive half plane of shifts.
| (
(nl_S.sum(axis=1) < 0)
| (
(nl_S.sum(axis=1) == 0)
& (
(nl_S[:, 2] < 0)
| ((nl_S[:, 2] == 0) & (nl_S[:, 1] < 0))
)
)
)
)
)
)
selected = np.logical_not(reject_condition)
nl_i = nl_i[selected]
nl_j = nl_j[selected]
nl_S = nl_S[selected]
nl_D = nl_D[selected]

samples = np.concatenate([nl_i[:, None], nl_j[:, None], nl_S], axis=1)
distances = torch.from_numpy(nl_D).to(dtype=dtype, device=device)

return TensorBlock(
values=distances.reshape(-1, 3, 1),
samples=Labels(
names=[
"first_atom",
"second_atom",
"cell_shift_a",
"cell_shift_b",
"cell_shift_c",
],
values=torch.from_numpy(samples).to(dtype=torch.int32, device=device),
assume_unique=True,
),
components=[Labels.range("xyz", 3).to(device)],
properties=Labels.range("distance", 1).to(device),
)


def _get_ase_input(
atoms: ase.Atoms,
name: str,
Expand Down
2 changes: 1 addition & 1 deletion python/metatomic_torch/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -312,7 +312,7 @@ def create_version_number(version):
torch_version = f"== {torch_v_major}.{torch_v_minor}.*"
except ImportError:
# otherwise we are building a sdist
torch_version = ">= 2.1"
torch_version = ">= 2.3"

install_requires = [
f"torch {torch_version}",
Expand Down
Loading