Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
133 changes: 133 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,7 @@ option(ESPRESSO_BUILD_TESTS "Enable tests" ON)
option(ESPRESSO_BUILD_WITH_SCAFACOS "Build with ScaFaCoS support" OFF)
option(ESPRESSO_BUILD_WITH_STOKESIAN_DYNAMICS "Build with Stokesian Dynamics"
OFF)
option(ESPRESSO_BUILD_WITH_METATENSOR "Build with Metatensor support" OFF)
option(ESPRESSO_BUILD_WITH_WALBERLA
"Build with waLBerla lattice-Boltzmann support" OFF)
option(ESPRESSO_BUILD_WITH_WALBERLA_AVX
Expand Down Expand Up @@ -923,6 +924,138 @@ if(ESPRESSO_BUILD_BENCHMARKS)
add_subdirectory(maintainer/benchmarks)
endif()

#
# Metatensor
#

if(ESPRESSO_BUILD_WITH_METATENSOR)
if (BUILD_OMP AND APPLE)
message(FATAL_ERROR
"Can not enable both BUILD_OMP and PGK_ML-METATOMIC on Apple systems, "
"since this results in two different versions of the OpenMP library (one "
"from the system and one from Torch) being linked to the final "
"executable, which then crashes"
)
endif()

# Bring the `torch` target in scope to allow evaluation of cmake generator
# expression from `metatensor_torch`
find_package(Torch REQUIRED)

# # cmake-format: off set(METATENSOR_URL_BASE
# "https://github.com/lab-cosmo/metatensor/releases/download")
# set(METATENSOR_CORE_VERSION "0.1.8") set(METATENSOR_TORCH_VERSION "0.5.3")
#
# include(FetchContent) set(BUILD_SHARED_LIBS on CACHE BOOL "")
# FetchContent_Declare( metatensor URL
# "${METATENSOR_URL_BASE}/metatensor-core-v${METATENSOR_CORE_VERSION}/metatensor-core-cxx-${METATENSOR_CORE_VERSION}.tar.gz"
# URL_HASH SHA1=3ed389770e5ec6dbb8cbc9ed88f84d6809b552ef )
# set(BUILD_SHARED_LIBS on CACHE BOOL "")
#
# # workaround for https://gitlab.kitware.com/cmake/cmake/-/issues/21146
# if(NOT DEFINED metatensor_SOURCE_DIR OR NOT EXISTS
# "${metatensor_SOURCE_DIR}") message(STATUS "Fetching metatensor
# v${METATENSOR_CORE_VERSION} from github") FetchContent_Populate(metatensor)
# endif() set(BUILD_SHARED_LIBS on CACHE BOOL "")
#
# FetchContent_Declare( metatensor_torch URL
# "${METATENSOR_URL_BASE}/metatensor-torch-v${METATENSOR_TORCH_VERSION}/metatensor-torch-cxx-${METATENSOR_TORCH_VERSION}.tar.gz"
# ) set(BUILD_SHARED_LIBS on CACHE BOOL "") if(NOT DEFINED
# metatensor_torch_SOURCE_DIR OR NOT EXISTS "${metatensor_torch_SOURCE_DIR}")
# message(STATUS "Fetching metatensor torch v${METATENSOR_CORE_VERSION} from
# github") FetchContent_Populate(metatensor_torch) endif() # cmake-format: on
# set(BUILD_SHARED_LIBS on CACHE BOOL "")
#
# set(METATENSOR_INSTALL_BOTH_STATIC_SHARED on CACHE BOOL "")
# add_subdirectory("${metatensor_SOURCE_DIR}")
# add_subdirectory("${metatensor_torch_SOURCE_DIR}")

# The caffe2::mkl target contains MKL_INCLUDE_DIR in it's
# INTERFACE_INCLUDE_DIRECTORIES even if MKL was not found, causing a build
# failure with "Imported target "torch" includes non-existent path" down the
# line. This code removes the missing path from INTERFACE_INCLUDE_DIRECTORIES,
# allowing the build to continue further.
# if (TARGET caffe2::mkl)
# get_target_property(CAFFE2_MKL_INCLUDE_DIRECTORIES caffe2::mkl INTERFACE_INCLUDE_DIRECTORIES)
# set(MKL_INCLUDE_DIR_NOTFOUND "")
# foreach(_include_dir_ ${CAFFE2_MKL_INCLUDE_DIRECTORIES})
# if ("${_include_dir_}" MATCHES "MKL_INCLUDE_DIR-NOTFOUND")
# set(MKL_INCLUDE_DIR_NOTFOUND "${_include_dir_}")
# endif()
# endforeach()
#
# if (NOT "${MKL_INCLUDE_DIR_NOTFOUND}" STREQUAL "")
# list(REMOVE_ITEM CAFFE2_MKL_INCLUDE_DIRECTORIES "${MKL_INCLUDE_DIR_NOTFOUND}")
# endif()
# set_target_properties(caffe2::mkl PROPERTIES
# INTERFACE_INCLUDE_DIRECTORIES "${CAFFE2_MKL_INCLUDE_DIRECTORIES}"
# )
# endif()

set(METATENSOR_CORE_VERSION "0.1.17")
set(METATENSOR_CORE_SHA256 "42119e11908239915ccc187d7ca65449b461f1d4b5af4d6df1fb613d687da76a")

set(METATENSOR_TORCH_VERSION "0.8.1")
set(METATENSOR_TORCH_SHA256 "9da124e8e09dc1859700723a76ff29aef7a216b84a19d38746cc45bf45bc599b")

set(METATOMIC_TORCH_VERSION "0.1.5")
set(METATOMIC_TORCH_SHA256 "8ecd1587797fe1cf6b2162ddc10cc84c558fdfd55ab225bc5de4fe15ace8fc3d")

set(DOWNLOAD_METATENSOR_DEFAULT ON)
find_package(metatensor_torch QUIET ${METATENSOR_TORCH_VERSION})
if (metatensor_torch_FOUND)
set(DOWNLOAD_METATENSOR_DEFAULT OFF)
endif()

set(DOWNLOAD_METATOMIC_DEFAULT ON)
find_package(metatomic_torch QUIET ${METATOMIC_TORCH_VERSION})
if (metatomic_torch_FOUND)
set(DOWNLOAD_METATOMIC_DEFAULT OFF)
endif()

option(DOWNLOAD_METATENSOR "Download metatensor package instead of using an already installed one" ${DOWNLOAD_METATENSOR_DEFAULT})
option(DOWNLOAD_METATOMIC "Download metatomic package instead of using an already installed one" ${DOWNLOAD_METATOMIC_DEFAULT})
if (DOWNLOAD_METATENSOR)
include(FetchContent)

set(URL_BASE "https://github.com/metatensor/metatensor/releases/download")
FetchContent_Declare(metatensor
URL ${URL_BASE}/metatensor-core-v${METATENSOR_CORE_VERSION}/metatensor-core-cxx-${METATENSOR_CORE_VERSION}.tar.gz
URL_HASH SHA256=${METATENSOR_CORE_SHA256}
)

message(STATUS "Fetching metatensor v${METATENSOR_CORE_VERSION} from github")
FetchContent_MakeAvailable(metatensor)

FetchContent_Declare(metatensor-torch
URL ${URL_BASE}/metatensor-torch-v${METATENSOR_TORCH_VERSION}/metatensor-torch-cxx-${METATENSOR_TORCH_VERSION}.tar.gz
URL_HASH SHA256=${METATENSOR_TORCH_SHA256}
)

message(STATUS "Fetching metatensor-torch v${METATENSOR_TORCH_VERSION} from github")
FetchContent_MakeAvailable(metatensor-torch)
else()
# make sure to fail the configuration if cmake can not find metatensor-torch
find_package(metatensor_torch REQUIRED ${METATENSOR_TORCH_VERSION})
endif()

if (DOWNLOAD_METATOMIC)
include(FetchContent)

set(URL_BASE "https://github.com/metatensor/metatomic/releases/download")
FetchContent_Declare(metatomic-torch
URL ${URL_BASE}/metatomic-torch-v${METATOMIC_TORCH_VERSION}/metatomic-torch-cxx-${METATOMIC_TORCH_VERSION}.tar.gz
URL_HASH SHA256=${METATOMIC_TORCH_SHA256}
)

message(STATUS "Fetching metatomic-torch v${METATOMIC_TORCH_VERSION} from github")
FetchContent_MakeAvailable(metatomic-torch)
else()
# make sure to fail the configuration if cmake can not find metatomic-torch
find_package(metatomic_torch REQUIRED ${METATOMIC_TORCH_VERSION})
endif()
endif()

#
# waLBerla
#
Expand Down
32 changes: 32 additions & 0 deletions build_script.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
export PATH=/usr/local/cuda-12.8/bin:$PATH
export LD_LIBRARY_PATH=/usr/local/cuda-12.8/lib64:$LD_LIBRARY_PATH
export CUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda-12.8

ESPRESSO_DIR=/tikhome/weeber/es
BUILD_DIR=/tikhome/weeber/es/build_mt
VENV_DIR=/work/weeber/es_mt-env

rm -rf ${BUILD_DIR}
mkdir -p ${BUILD_DIR}
cd ${BUILD_DIR}

source ${VENV_DIR}/bin/activate

export TORCH_PREFIX=$(python -c "import torch; print(torch.utils.cmake_prefix_path)")
export MTS_PREFIX=$(python -c "import metatensor; print(metatensor.utils.cmake_prefix_path)")
export MTS_TORCH_PREFIX=$(python -c "import metatensor.torch; print(metatensor.torch.utils.cmake_prefix_path)")
export MTA_TORCH_PREFIX=$(python -c "import metatomic.torch; print(metatomic.torch.utils.cmake_prefix_path)")
export CMAKE_PREFIX_PATH="$TORCH_PREFIX;$MTS_PREFIX;$MTS_TORCH_PREFIX;$MTA_TORCH_PREFIX"

cd ${BUILD_DIR}
cmake ../ \
-D CMAKE_BUILD_TYPE=Debug \
-D ESPRESSO_BUILD_WITH_CUDA=OFF \
-D ESPRESSO_BUILD_WITH_CCACHE=OFF \
-D ESPRESSO_BUILD_WITH_WALBERLA=OFF \
-D ESPRESSO_BUILD_WITH_WALBERLA_AVX=OFF \
-D ESPRESSO_BUILD_WITH_GSL=OFF \
-D ESPRESSO_BUILD_WITH_METATENSOR=ON \
-D CMAKE_PREFIX_PATH="$CMAKE_PREFIX_PATH"

make -j$(nproc)
2 changes: 2 additions & 0 deletions cmake/espresso_cmake_config.cmakein
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@

#cmakedefine ESPRESSO_BUILD_WITH_STOKESIAN_DYNAMICS

#cmakedefine ESPRESSO_BUILD_WITH_METATENSOR

#cmakedefine ESPRESSO_BUILD_WITH_WALBERLA

#cmakedefine ESPRESSO_BUILD_WITH_VALGRIND
Expand Down
Binary file added lennard-jones.pt
Binary file not shown.
1 change: 1 addition & 0 deletions src/config/features.def
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,7 @@ HDF5 external
SCAFACOS external
GSL external
STOKESIAN_DYNAMICS external
METATENSOR external
WALBERLA external
VALGRIND external
CALIPER external
Expand Down
7 changes: 7 additions & 0 deletions src/core/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,12 @@ if(ESPRESSO_BUILD_WITH_CUDA)
"${CABANA_CUDA_CLANG_TIDY}")
endif()
endif()
if(ESPRESSO_BUILD_WITH_METATENSOR)
target_link_libraries(espresso_core PUBLIC "${TORCH_LIBRARIES}")
target_link_libraries(espresso_core PUBLIC metatensor::shared)
target_link_libraries(espresso_core PUBLIC metatomic_torch)
target_link_libraries(espresso_core PUBLIC metatensor_torch)
endif()

install(TARGETS espresso_core
LIBRARY DESTINATION ${ESPRESSO_INSTALL_PYTHON}/espressomd)
Expand Down Expand Up @@ -115,6 +121,7 @@ add_subdirectory(immersed_boundary)
add_subdirectory(integrators)
add_subdirectory(io)
add_subdirectory(lb)
add_subdirectory(ml_metatensor)
add_subdirectory(magnetostatics)
add_subdirectory(nonbonded_interactions)
add_subdirectory(object-in-fluid)
Expand Down
23 changes: 23 additions & 0 deletions src/core/ml_metatensor/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
#
# Copyright (C) 2018-2022 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#

if(ESPRESSO_BUILD_WITH_METATENSOR)
target_sources(espresso_core PRIVATE model.cpp stub.cpp)
# target_sources(espresso_core PRIVATE load_model.cpp)
endif()
71 changes: 71 additions & 0 deletions src/core/ml_metatensor/add_neighbor_list.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
#include "metatensor/torch/atomistic/system.hpp"
#include "utils/Vector.hpp"
#include <variant>

struct PairInfo {
int part_id_1;
int part_id_2;
Utils::Vector3d distance;
};

using Sample = std::array<int32_t, 5>;
using Distances = std::variant<std::vector<std::array<double, 3>>,
std::vector<std::array<float, 3>>>;

template <typename PairIterable>
metatensor_torch::TorchTensorBlock
neighbor_list_from_pairs(const metatensor_torch::System &system,
const PairIterable &pairs) {
auto dtype = system->positions().scalar_type();
auto device = system->positions().device();
std::vector<Sample> samples;
Distances distances;

if (dtype == torch::kFloat64) {
distances = {std::vector<std::array<double, 3>>()};
} else if (dtype == torch::kFloat32) {
distances = {std::vector<std::array<float, 3>>()};
} else {
throw std::runtime_error("Unsupported floating point data type");
}

for (auto const &pair : pairs) {
samples.emplace_back(pair.particle_id_1, pair.particle_id_2, 0, 0, 0);
std::visit([&pair](auto &vec) { vec.push_back(pair.distance); }, distances);
}

auto n_pairs = static_cast<int64_t>(samples.size());

auto samples_tensor = torch::from_blob(
samples.data(), {n_pairs, 5},
torch::TensorOptions().dtype(torch::kInt32).device(torch::kCPU));

auto samples_ptr = torch::make_intrusive<metatensor_torch::LabelsHolder>(
std::vector<std::string>{"first_atom", "second_atom", "cell_shift_a",
"cell_shift_b", "cell_shift_c"},
samples);

auto distances_vectors = torch::from_blob(
std::visit([](auto &vec) { return vec.data(); }, distances),
{n_pairs, 3, 1}, torch::TensorOptions().dtype(dtype).device(torch::kCPU));

auto neighbors = torch::make_intrusive<metatensor_torch::TensorBlockHolder>(
distances_vectors.to(dtype).to(device), samples_ptr->to(device),
std::vector<metatensor_torch::TorchLabels>{
metatensor_torch::LabelsHolder::create({"xyz"}, {{0}, {1}, {2}})
->to(device),
},
metatensor_torch::LabelsHolder::create({"distance"}, {{0}})->to(device));

return neighbors;
}

void add_neighbor_list_to_system(
metatensor_torch::System &system,
const metatensor_torch::TorchTensorBlock &neighbors,
const metatensor_torch::NeighborListOptions &options,
bool check_consistency) {
metatensor_torch::register_autograd_neighbors(system, neighbors,
check_consistency);
system->add_neighbor_list(options, neighbors);
}
83 changes: 83 additions & 0 deletions src/core/ml_metatensor/compute.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
#include "metatensor/torch/atomistic/system.hpp"
#include <metatensor/torch/atomistic/model.hpp>
#include <metatensor/torch/tensor.hpp>

metatensor_torch::TensorMapHolder
run_model(metatensor_torch::System &system, int64_t n_particles,
const metatensor_torch::ModelEvaluationOptions evaluation_options,
torch::Dtype dtype, torch::Device device, bool check_consistency) {


// only run the calculation for atoms actually in the current domain
auto options = torch::TensorOptions().dtype(torch::kInt32);
auto selected_atoms_values = torch::zeros({n_particles, 2}, options);

for (int i = 0; i < n_particles; i++) {
selected_atoms_values[i][0] = 0;
selected_atoms_values[i][1] = i;
}
auto selected_atoms = torch::make_intrusive<metatensor_torch::LabelsHolder>(
std::vector<std::string>{"system", "atom"}, selected_atoms_values);
evaluation_options->set_selected_atoms(selected_atoms->to(device));

torch::IValue result_ivalue;
model.forward({std::vector<metatensor_torch::System>{system},
evaluation_options, check_consistency});

auto result = result_ivalue.toGenericDict();
auto energy =
result.at("energy").toCustomClass<metatensor_torch::TensorMapHolder>();
auto energy_tensor =
metatensor_torch::TensorMapHolder::block_by_id(energy, 0);
}

double get_energy(metatensor_torch::TensorMapHolder &energy,
bool energy_is_per_atom) {
auto energy_block = metatensor_torch::TensorMapHolder::block_by_id(energy, 0);
auto energy_tensor = energy_block->values();
auto energy_detached =
energy_tensor.detach().to(torch::kCPU).to(torch::kFloat64);
auto energy_samples = energy_block->samples();

// store the energy returned by the model
torch::Tensor global_energy;
if (energy_is_per_atom) {
assert(energy_samples->size() == 2);
assert(energy_samples->names()[0] == "system");
assert(energy_samples->names()[1] == "atom");

auto samples_values = energy_samples->values().to(torch::kCPU);
auto samples = samples_values.accessor<int32_t, 2>();

// int n_atoms = selected_atoms_values.sizes();
// assert(samples_values.sizes() == selected_atoms_values.sizes());

auto energies = energy_detached.accessor<double, 2>();
global_energy = energy_detached.sum(0);
assert(energy_detached.sizes() == std::vector<int64_t>({1}));
} else {
assert(energy_samples->size() == 1);
assert(energy_samples->names()[0] == "system");

assert(energy_detached.sizes() == std::vector<int64_t>({1, 1}));
global_energy = energy_detached.reshape({1});
}

return global_energy.item<double>();
}

torch::Tensor get_forces(metatensor::TensorMap &energy,
metatensor_torch::System &system) {
// reset gradients to zero before calling backward
system->positions().mutable_grad() = torch::Tensor();

auto energy_block = metatensor_torch::TensorMapHolder::block_by_id(energy, 0);
auto energy_tensor = energy_block->values();

// compute forces/virial with backward propagation
energy_tensor.backward(-torch::ones_like(energy_tensor));
auto forces_tensor = system->positions().grad();
assert(forces_tensor.is_cpu() &&
forces_tensor.scalar_type() == torch::kFloat64);
return forces_tensor;
}
Loading