Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion .github/workflows/build-wheels.yml
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ jobs:
name: ${{ matrix.name }} (torch v${{ matrix.torch-version }})
strategy:
matrix:
torch-version: ['2.1', '2.2', '2.3', '2.4', '2.5', '2.6', '2.7']
torch-version: ['2.1', '2.2', '2.3', '2.4', '2.5', '2.6', '2.7', '2.8']
arch: ['arm64', 'x86_64']
os: ['ubuntu-22.04', 'ubuntu-22.04-arm', 'macos-13', 'macos-14', 'windows-2022']
exclude:
Expand All @@ -115,6 +115,7 @@ jobs:
- {os: macos-13, arch: x86_64, torch-version: '2.5'}
- {os: macos-13, arch: x86_64, torch-version: '2.6'}
- {os: macos-13, arch: x86_64, torch-version: '2.7'}
- {os: macos-13, arch: x86_64, torch-version: '2.8'}
include:
# add `cibw-arch` and `rust-target` to the different configurations
- name: x86_64 Linux
Expand Down Expand Up @@ -149,6 +150,8 @@ jobs:
- {torch-version: '2.4', cibw-python: 'cp312-*'}
- {torch-version: '2.5', cibw-python: 'cp312-*'}
- {torch-version: '2.6', cibw-python: 'cp312-*'}
- {torch-version: '2.7', cibw-python: 'cp312-*'}
- {torch-version: '2.8', cibw-python: 'cp312-*'}
steps:
- uses: actions/checkout@v4
with:
Expand Down
8 changes: 4 additions & 4 deletions .github/workflows/torch-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,26 +24,26 @@ jobs:
cargo-test-flags: --release

- os: ubuntu-22.04
torch-version: "2.7"
torch-version: "2.8"
python-version: "3.13"
cargo-test-flags: --release
do-valgrind: true

- os: ubuntu-22.04
container: ubuntu:20.04
extra-name: ", cmake 3.16"
torch-version: "2.7"
torch-version: "2.8"
python-version: "3.13"
cargo-test-flags: ""
cxx-flags: -fsanitize=undefined -fsanitize=address -fno-omit-frame-pointer -g

- os: macos-14
torch-version: "2.7"
torch-version: "2.8"
python-version: "3.13"
cargo-test-flags: --release

- os: windows-2022
torch-version: "2.7"
torch-version: "2.8"
python-version: "3.13"
cargo-test-flags: --release

Expand Down
2 changes: 1 addition & 1 deletion docs/featomic-json-schema/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,5 +14,5 @@ test = false

[dependencies]
featomic = {path = "../../featomic"}
schemars = "=1.0.0-alpha.17"
schemars = "1"
serde_json = "1"
4 changes: 2 additions & 2 deletions docs/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@ myst-parser # markdown => rst translation, used in extensions/featomic_json_
# dependencies for the tutorials
--extra-index-url https://download.pytorch.org/whl/cpu
metatensor-operations >=0.3.3,<0.4
metatensor-torch >= 0.7.6,<0.8
metatomic-torch >= 0.1.1,<0.2
metatensor-torch >=0.8.0,<0.9
metatomic-torch >= 0.1.4,<0.2
torch
chemfiles
matplotlib
Expand Down
12 changes: 5 additions & 7 deletions featomic-torch/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -122,8 +122,8 @@ find_package(Torch 2.1 REQUIRED)
#
# When updating METATENSOR_TORCH_FETCH_VERSION, you will also have to update the
# SHA256 sum of the file in `FetchContent_Declare`.
set(METATENSOR_TORCH_FETCH_VERSION "0.7.6")
set(REQUIRED_METATENSOR_TORCH_VERSION "0.7")
set(METATENSOR_TORCH_FETCH_VERSION "0.8.0")
set(REQUIRED_METATENSOR_TORCH_VERSION "0.8")
if (FEATOMIC_FETCH_METATENSOR_TORCH)
message(STATUS "Fetching metatensor-torch from github")

Expand All @@ -132,7 +132,7 @@ if (FEATOMIC_FETCH_METATENSOR_TORCH)
FetchContent_Declare(
metatensor_torch
URL ${URL_ROOT}/metatensor-torch-v${METATENSOR_TORCH_FETCH_VERSION}/metatensor-torch-cxx-${METATENSOR_TORCH_FETCH_VERSION}.tar.gz
URL_HASH SHA256=8dcc07c86094034facba09ebcc6b52f41847c2413737c8f9c88ae0a2990f8d41
URL_HASH SHA256=61d383ce958deafe0e3916088185527680c9118588722b17ec5c39cfbaa6da55
)

FetchContent_MakeAvailable(metatensor_torch)
Expand All @@ -150,19 +150,17 @@ endif()
#
# When updating METATOMIC_TORCH_FETCH_VERSION, you will also have to update the
# SHA256 sum of the file in `FetchContent_Declare`.
set(METATOMIC_TORCH_FETCH_VERSION "0.1.1")
set(METATOMIC_TORCH_FETCH_VERSION "0.1.4")
set(REQUIRED_METATOMIC_TORCH_VERSION "0.1")
if (FEATOMIC_FETCH_METATENSOR_TORCH)
message(STATUS "Fetching metatomic-torch from github")
find_package(Patch REQUIRED)

set(URL_ROOT "https://github.com/metatensor/metatomic/releases/download")
include(FetchContent)
FetchContent_Declare(
metatomic_torch
URL ${URL_ROOT}/metatomic-torch-v${METATOMIC_TORCH_FETCH_VERSION}/metatomic-torch-cxx-${METATOMIC_TORCH_FETCH_VERSION}.tar.gz
URL_HASH SHA256=2dc0a0213b7f5df3ac519516f1b17801baa6973d96b339f0b39cadea310fefe1
PATCH_COMMAND ${Patch_EXECUTABLE} -p2 < ${CMAKE_CURRENT_SOURCE_DIR}/cmake/metatomic-cmake.patch
URL_HASH SHA256=385ec8b8515d674b6a9f093f724792b2469e7ea2365ca596f574b64e38494f94
)

FetchContent_MakeAvailable(metatomic_torch)
Expand Down
2 changes: 1 addition & 1 deletion featomic-torch/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ test = false
doctest = false

[dev-dependencies]
which = "7"
which = "8"

[lints.rust]
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tarpaulin)'] }
39 changes: 0 additions & 39 deletions featomic-torch/cmake/metatomic-cmake.patch

This file was deleted.

2 changes: 1 addition & 1 deletion featomic-torch/include/featomic/torch/autograd.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ class FEATOMIC_TORCH_EXPORT FeatomicAutograd: public torch::autograd::Function<F
torch::Tensor all_positions,
torch::Tensor all_cells,
torch::IValue systems_start,
metatensor_torch::TorchTensorBlock block
metatensor_torch::TensorBlock block
);

/// Backward step: get the gradients of some quantity `A` w.r.t. the outputs
Expand Down
6 changes: 3 additions & 3 deletions featomic-torch/include/featomic/torch/calculator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ class FEATOMIC_TORCH_EXPORT CalculatorHolder: public torch::CustomClassHolder {
}

/// Run a calculation for the given `systems` using the given options.
metatensor_torch::TorchTensorMap compute(
metatensor_torch::TensorMap compute(
std::vector<metatomic_torch::System> systems,
TorchCalculatorOptions options = {}
);
Expand All @@ -123,9 +123,9 @@ class FEATOMIC_TORCH_EXPORT CalculatorHolder: public torch::CustomClassHolder {
/// contain `"cell"` gradients.
///
/// `forward_gradients` controls which gradients are left inside the TensorMap.
metatensor_torch::TorchTensorMap FEATOMIC_TORCH_EXPORT register_autograd(
metatensor_torch::TensorMap FEATOMIC_TORCH_EXPORT register_autograd(
std::vector<metatomic_torch::System> systems,
metatensor_torch::TorchTensorMap precomputed,
metatensor_torch::TensorMap precomputed,
std::vector<std::string> forward_gradients
);

Expand Down
51 changes: 25 additions & 26 deletions featomic-torch/src/autograd.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

#include "./openmp.hpp"

using namespace metatensor_torch;
using namespace featomic_torch;

// # NOTATION
Expand All @@ -29,7 +28,7 @@ struct PositionsGrad: torch::autograd::Function<PositionsGrad<scalar_t>> {
torch::autograd::AutogradContext *ctx,
torch::Tensor all_positions,
torch::Tensor dA_dX,
TorchTensorBlock dX_dr,
metatensor_torch::TensorBlock dX_dr,
torch::IValue systems_start
);

Expand All @@ -46,7 +45,7 @@ struct CellGrad: torch::autograd::Function<CellGrad<scalar_t>> {
torch::autograd::AutogradContext *ctx,
torch::Tensor all_cells,
torch::Tensor dA_dX,
TorchTensorBlock dX_dH,
metatensor_torch::TensorBlock dX_dH,
torch::Tensor systems
);

Expand All @@ -72,16 +71,16 @@ struct CellGrad: torch::autograd::Function<CellGrad<scalar_t>> {
} \
} while (false)

static std::vector<TorchTensorBlock> extract_gradient_blocks(
const TorchTensorMap& tensor,
static std::vector<metatensor_torch::TensorBlock> extract_gradient_blocks(
const metatensor_torch::TensorMap& tensor,
const std::string& parameter
) {
auto gradients = std::vector<TorchTensorBlock>();
auto gradients = std::vector<metatensor_torch::TensorBlock>();
for (int64_t i=0; i<tensor->keys()->count(); i++) {
auto block = TensorMapHolder::block_by_id(tensor, i);
auto gradient = TensorBlockHolder::gradient(block, parameter);
auto block = metatensor_torch::TensorMapHolder::block_by_id(tensor, i);
auto gradient = metatensor_torch::TensorBlockHolder::gradient(block, parameter);

gradients.push_back(torch::make_intrusive<TensorBlockHolder>(
gradients.push_back(torch::make_intrusive<metatensor_torch::TensorBlockHolder>(
gradient->values(),
gradient->samples(),
gradient->components(),
Expand All @@ -101,15 +100,15 @@ std::vector<torch::Tensor> FeatomicAutograd::forward(
torch::Tensor all_positions,
torch::Tensor all_cells,
torch::IValue systems_start,
metatensor_torch::TorchTensorBlock block
metatensor_torch::TensorBlock block
) {
ctx->save_for_backward({all_positions, all_cells});

if (all_positions.requires_grad()) {
ctx->saved_data.emplace("systems_start", systems_start);

auto gradient = TensorBlockHolder::gradient(block, "positions");
ctx->saved_data["positions_gradients"] = torch::make_intrusive<TensorBlockHolder>(
auto gradient = metatensor_torch::TensorBlockHolder::gradient(block, "positions");
ctx->saved_data["positions_gradients"] = torch::make_intrusive<metatensor_torch::TensorBlockHolder>(
gradient->values(),
gradient->samples(),
gradient->components(),
Expand All @@ -120,8 +119,8 @@ std::vector<torch::Tensor> FeatomicAutograd::forward(
if (all_cells.requires_grad()) {
ctx->saved_data["samples"] = block->samples();

auto gradient = TensorBlockHolder::gradient(block, "cell");
ctx->saved_data["cell_gradients"] = torch::make_intrusive<TensorBlockHolder>(
auto gradient = metatensor_torch::TensorBlockHolder::gradient(block, "cell");
ctx->saved_data["cell_gradients"] = torch::make_intrusive<metatensor_torch::TensorBlockHolder>(
gradient->values(),
gradient->samples(),
gradient->components(),
Expand Down Expand Up @@ -151,7 +150,7 @@ std::vector<torch::Tensor> FeatomicAutograd::backward(

// ===================== gradient w.r.t. positions ====================== //
if (all_positions.requires_grad()) {
auto forward_gradient = ctx->saved_data["positions_gradients"].toCustomClass<TensorBlockHolder>();
auto forward_gradient = ctx->saved_data["positions_gradients"].toCustomClass<metatensor_torch::TensorBlockHolder>();
auto systems_start = ctx->saved_data["systems_start"];

if (all_positions.scalar_type() == torch::kFloat32) {
Expand Down Expand Up @@ -179,8 +178,8 @@ std::vector<torch::Tensor> FeatomicAutograd::backward(

// ======================= gradient w.r.t. cell ========================= //
if (all_cells.requires_grad()) {
auto forward_gradient = ctx->saved_data["cell_gradients"].toCustomClass<TensorBlockHolder>();
auto block_samples = ctx->saved_data["samples"].toCustomClass<LabelsHolder>();
auto forward_gradient = ctx->saved_data["cell_gradients"].toCustomClass<metatensor_torch::TensorBlockHolder>();
auto block_samples = ctx->saved_data["samples"].toCustomClass<metatensor_torch::LabelsHolder>();

// find the index of the "system" dimension in the samples
const auto& sample_names = block_samples->names();
Expand Down Expand Up @@ -238,7 +237,7 @@ std::vector<torch::Tensor> PositionsGrad<scalar_t>::forward(
torch::autograd::AutogradContext *ctx,
torch::Tensor all_positions,
torch::Tensor dA_dX,
TorchTensorBlock dX_dr,
metatensor_torch::TensorBlock dX_dr,
torch::IValue systems_start_ivalue
) {
// ====================== input parameters checks ======================= //
Expand Down Expand Up @@ -323,10 +322,10 @@ std::vector<torch::Tensor> PositionsGrad<scalar_t>::backward(
) {
// ====================== input parameters checks ======================= //
auto saved_variables = ctx->get_saved_variables();
auto all_positions = saved_variables[0];
auto dA_dX = saved_variables[1];
const auto& all_positions = saved_variables[0];
const auto& dA_dX = saved_variables[1];

auto dX_dr = ctx->saved_data["positions_gradients"].toCustomClass<TensorBlockHolder>();
auto dX_dr = ctx->saved_data["positions_gradients"].toCustomClass<metatensor_torch::TensorBlockHolder>();
auto systems_start = ctx->saved_data["systems_start"].toIntList();

auto dB_d_dA_dr = grad_outputs[0]; // gradient of B w.r.t. dA/dr (output of forward)
Expand Down Expand Up @@ -430,7 +429,7 @@ std::vector<torch::Tensor> CellGrad<scalar_t>::forward(
torch::autograd::AutogradContext *ctx,
torch::Tensor all_cells,
torch::Tensor dA_dX,
TorchTensorBlock dX_dH,
metatensor_torch::TensorBlock dX_dH,
torch::Tensor systems
) {
// ====================== input parameters checks ======================= //
Expand Down Expand Up @@ -509,11 +508,11 @@ std::vector<torch::Tensor> CellGrad<scalar_t>::backward(
) {
// ====================== input parameters checks ======================= //
auto saved_variables = ctx->get_saved_variables();
auto all_cells = saved_variables[0];
auto dA_dX = saved_variables[1];
auto systems = saved_variables[2];
const auto& all_cells = saved_variables[0];
const auto& dA_dX = saved_variables[1];
const auto& systems = saved_variables[2];

auto dX_dH = ctx->saved_data["cell_gradients"].toCustomClass<TensorBlockHolder>();
auto dX_dH = ctx->saved_data["cell_gradients"].toCustomClass<metatensor_torch::TensorBlockHolder>();

auto dB_d_dA_dH = grad_outputs[0]; // gradient of B w.r.t. dA/dH (output of forward)

Expand Down
Loading
Loading