diff --git a/tmol/database/scoring/omega_bbdep.py b/tmol/database/scoring/omega_bbdep.py index af5308c86..119e53c59 100644 --- a/tmol/database/scoring/omega_bbdep.py +++ b/tmol/database/scoring/omega_bbdep.py @@ -39,5 +39,4 @@ def from_file(cls, fname: str): OmegaBBDepTables, ] ): - print("safe globals: ", torch.serialization.get_safe_globals()) return torch.load(fname) diff --git a/tmol/io/details/compiled/compiled.ops.cpp b/tmol/io/details/compiled/compiled.ops.cpp index eafcdfd87..c0c8cefdc 100644 --- a/tmol/io/details/compiled/compiled.ops.cpp +++ b/tmol/io/details/compiled/compiled.ops.cpp @@ -226,6 +226,12 @@ TORCH_LIBRARY_(TORCH_EXTENSION_NAME, m) { m.def("resolve_his_taut", &resolve_his_tautomerization); } +// #define PYBIND11_MODULE_(ns, m) PYBIND11_MODULE(ns, m) +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("gen_pose_leaf_atoms", &pose_leaf_atom_gen_op); + m.def("resolve_his_taut", &resolve_his_tautomerization); +} + } // namespace compiled } // namespace details } // namespace io diff --git a/tmol/io/details/compiled/compiled.py b/tmol/io/details/compiled/compiled.py index f12213a3e..ed758198a 100644 --- a/tmol/io/details/compiled/compiled.py +++ b/tmol/io/details/compiled/compiled.py @@ -1,23 +1,19 @@ -import torch -from tmol.utility.cpp_extension import load, modulename, relpaths, cuda_if_available +from tmol.utility.cpp_extension import relpaths, TorchOpLoader -load( - modulename(__name__), - cuda_if_available( - relpaths( - __file__, - [ - "compiled.ops.cpp", - "gen_pose_leaf_atoms.cpu.cpp", - "gen_pose_leaf_atoms.cuda.cu", - "resolve_his_taut.cpu.cpp", - "resolve_his_taut.cuda.cu", - ], - ) - ), - is_python_module=False, +sources = relpaths( + __file__, + [ + "compiled.ops.cpp", + "gen_pose_leaf_atoms.cpu.cpp", + "gen_pose_leaf_atoms.cuda.cu", + "resolve_his_taut.cpu.cpp", + "resolve_his_taut.cuda.cu", + ], ) -_ops = getattr(torch.ops, modulename(__name__)) -gen_pose_leaf_atoms = _ops.gen_pose_leaf_atoms -resolve_his_taut = _ops.resolve_his_taut +functions = ["gen_pose_leaf_atoms", "resolve_his_taut"] + +loader = TorchOpLoader(__name__, sources, functions) + +gen_pose_leaf_atoms = loader.gen_pose_leaf_atoms +resolve_his_taut = loader.resolve_his_taut diff --git a/tmol/kinematics/compiled/__init__.py b/tmol/kinematics/compiled/__init__.py index 097524af2..027df2519 100644 --- a/tmol/kinematics/compiled/__init__.py +++ b/tmol/kinematics/compiled/__init__.py @@ -1,4 +1,3 @@ -from .compiled_ops import forward_kin_op -from .compiled_inverse_kin import inverse_kin +from .compiled_ops import forward_kin_op, inverse_kin __all__ = ["forward_kin_op", "inverse_kin"] diff --git a/tmol/kinematics/compiled/compiled_inverse_kin.cpp b/tmol/kinematics/compiled/compiled_inverse_kin.cpp deleted file mode 100644 index 1ac428de3..000000000 --- a/tmol/kinematics/compiled/compiled_inverse_kin.cpp +++ /dev/null @@ -1,51 +0,0 @@ -#pragma once - -#include -#include - -#include - -#include - -#include -#include -#include -#include - -#include "common_dispatch.hh" -namespace tmol { -namespace kinematics { - -// pybind-ings for inverse kinematics -// - not part of the evaluation graph but is used in setup -template -void bind_dispatch(pybind11::module& m) { - using namespace pybind11::literals; - using namespace tmol::utility::function_dispatch; - - add_dispatch_impl( - m, - "inverse_kin", - &InverseKinDispatch::f, - "coords"_a, - "parent"_a, - "frame_x"_a, - "frame_y"_a, - "frame_z"_a, - "doftype"_a); -}; - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - using namespace pybind11::literals; - - bind_dispatch(m); - bind_dispatch(m); - -#ifdef WITH_CUDA - bind_dispatch(m); - bind_dispatch(m); -#endif -} - -} // namespace kinematics -} // namespace tmol diff --git a/tmol/kinematics/compiled/compiled_inverse_kin.py b/tmol/kinematics/compiled/compiled_inverse_kin.py deleted file mode 100644 index 7c5a2fae8..000000000 --- a/tmol/kinematics/compiled/compiled_inverse_kin.py +++ /dev/null @@ -1,16 +0,0 @@ -from tmol.utility.cpp_extension import load, relpaths, modulename, cuda_if_available - -_compiled = load( - modulename(__name__), - cuda_if_available( - relpaths( - __file__, - ["compiled_inverse_kin.cpp", "compiled.cpu.cpp", "compiled.cuda.cu"], - ) - ), - is_python_module=True, -) - - -def inverse_kin(*args, **kwargs): - return _compiled.inverse_kin[(args[0].device.type, args[0].dtype)](*args, **kwargs) diff --git a/tmol/kinematics/compiled/compiled_ops.cpp b/tmol/kinematics/compiled/compiled_ops.cpp index d2f7ffc43..409eff4fb 100644 --- a/tmol/kinematics/compiled/compiled_ops.cpp +++ b/tmol/kinematics/compiled/compiled_ops.cpp @@ -19,6 +19,66 @@ using torch::autograd::AutogradContext; using torch::autograd::Function; using torch::autograd::tensor_list; +class InverseKinematicOp + : public torch::autograd::Function { + public: + static Tensor forward( + AutogradContext* ctx, + Tensor coords, + Tensor parent, + Tensor frame_x, + Tensor frame_y, + Tensor frame_z, + Tensor doftype) { + at::Tensor ret; + + using Int = int32_t; + + TMOL_DISPATCH_FLOATING_DEVICE(coords.options(), "inverse_kin_op", ([&] { + using Real = scalar_t; + constexpr tmol::Device Dev = device_t; + + auto result = + InverseKinDispatch::f( + TCAST(coords), + TCAST(parent), + TCAST(frame_x), + TCAST(frame_y), + TCAST(frame_z), + TCAST(doftype)); + ret = result.tensor; + })); + + // ctx->save_for_backward({HTs, dofs, nodes_b, scans_b, gens_b, kintree}); + + return ret; + } + + static tensor_list backward(AutogradContext* ctx, tensor_list grad_outputs) { + // auto saved = ctx->get_saved_variables(); + + return { + torch::Tensor(), + torch::Tensor(), + torch::Tensor(), + torch::Tensor(), + torch::Tensor(), + torch::Tensor(), + }; + } +}; + +Tensor inverse_kin_op( + Tensor coords, + Tensor parent, + Tensor frame_x, + Tensor frame_y, + Tensor frame_z, + Tensor doftype) { + return InverseKinematicOp::apply( + coords, parent, frame_x, frame_y, frame_z, doftype); +} + class KinematicOp : public torch::autograd::Function { public: static Tensor forward( @@ -611,5 +671,24 @@ TORCH_LIBRARY_(TORCH_EXTENSION_NAME, m) { m.def("minimizer_map_from_movemap", &minimizer_map_from_movemap); } +// #define PYBIND11_MODULE_(ns, m) PYBIND11_MODULE(ns, m) +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward_kin_op", &kinematic_op); + m.def("forward_only_op", &forward_only_op); + m.def("get_kfo_indices_for_atoms", &get_kfo_indices_for_atoms); + m.def("get_kfo_atom_parents", &get_kfo_atom_parents); + m.def("get_children", &get_children); + m.def("get_id_and_frame_xyz", &get_id_and_frame_xyz); + m.def("calculate_ff_edge_delays", &calculate_ff_edge_delays); + m.def("get_jump_atom_indices", &get_jump_atom_indices); + m.def( + "get_block_parent_connectivity_from_toposort", + &get_block_parent_connectivity_from_toposort); + m.def("get_kinforest_scans_from_stencils", &get_scans2); + m.def("get_kinforest_scans_from_stencils2", &get_scans2); + m.def("minimizer_map_from_movemap", &minimizer_map_from_movemap); + m.def("inverse_kin", &inverse_kin_op); +} + } // namespace kinematics } // namespace tmol diff --git a/tmol/kinematics/compiled/compiled_ops.py b/tmol/kinematics/compiled/compiled_ops.py index 66cd38881..2c91a2c34 100644 --- a/tmol/kinematics/compiled/compiled_ops.py +++ b/tmol/kinematics/compiled/compiled_ops.py @@ -1,26 +1,39 @@ -import torch -from tmol.utility.cpp_extension import load, relpaths, modulename, cuda_if_available +from tmol.utility.cpp_extension import relpaths, TorchOpLoader -load( - modulename(__name__), - cuda_if_available( - relpaths(__file__, ["compiled_ops.cpp", "compiled.cpu.cpp", "compiled.cuda.cu"]) - ), - is_python_module=False, +sources = relpaths( + __file__, ["compiled_ops.cpp", "compiled.cpu.cpp", "compiled.cuda.cu"] ) -_ops = getattr(torch.ops, modulename(__name__)) -forward_kin_op = _ops.forward_kin_op -forward_only_op = _ops.forward_only_op -get_kfo_indices_for_atoms = _ops.get_kfo_indices_for_atoms -get_kfo_atom_parents = _ops.get_kfo_atom_parents -get_children = _ops.get_children -get_id_and_frame_xyz = _ops.get_id_and_frame_xyz -calculate_ff_edge_delays = _ops.calculate_ff_edge_delays -get_jump_atom_indices = _ops.get_jump_atom_indices +functions = [ + "forward_kin_op", + "forward_only_op", + "get_kfo_indices_for_atoms", + "get_kfo_atom_parents", + "get_children", + "get_id_and_frame_xyz", + "calculate_ff_edge_delays", + "get_jump_atom_indices", + "get_block_parent_connectivity_from_toposort", + "get_kinforest_scans_from_stencils", + "get_kinforest_scans_from_stencils2", + "minimizer_map_from_movemap", + "inverse_kin", +] + +loader = TorchOpLoader(__name__, sources, functions) + +forward_kin_op = loader.forward_kin_op +forward_only_op = loader.forward_only_op +get_kfo_indices_for_atoms = loader.get_kfo_indices_for_atoms +get_kfo_atom_parents = loader.get_kfo_atom_parents +get_children = loader.get_children +get_id_and_frame_xyz = loader.get_id_and_frame_xyz +calculate_ff_edge_delays = loader.calculate_ff_edge_delays +get_jump_atom_indices = loader.get_jump_atom_indices get_block_parent_connectivity_from_toposort = ( - _ops.get_block_parent_connectivity_from_toposort + loader.get_block_parent_connectivity_from_toposort ) -get_kinforest_scans_from_stencils = _ops.get_kinforest_scans_from_stencils -get_kinforest_scans_from_stencils2 = _ops.get_kinforest_scans_from_stencils2 -minimizer_map_from_movemap = _ops.minimizer_map_from_movemap +get_kinforest_scans_from_stencils = loader.get_kinforest_scans_from_stencils +get_kinforest_scans_from_stencils2 = loader.get_kinforest_scans_from_stencils2 +minimizer_map_from_movemap = loader.minimizer_map_from_movemap +inverse_kin = loader.inverse_kin diff --git a/tmol/numeric/bspline_compiled/compiled.py b/tmol/numeric/bspline_compiled/compiled.py index ea7e1c167..b32fbe6a0 100644 --- a/tmol/numeric/bspline_compiled/compiled.py +++ b/tmol/numeric/bspline_compiled/compiled.py @@ -1,11 +1,21 @@ -from tmol.utility.cpp_extension import load, relpaths, modulename +from tmol.utility.cpp_extension import relpaths, TorchOpLoader -# only CPU is exposed via pybind -_compiled = load(modulename(__name__), relpaths(__file__, ["bspline.pybind.cpp"])) +sources = relpaths(__file__, ["bspline.pybind.cpp"]) -computeCoeffs2 = _compiled.computeCoeffs2 -interpolate2 = _compiled.interpolate2 -computeCoeffs3 = _compiled.computeCoeffs3 -interpolate3 = _compiled.interpolate3 -computeCoeffs4 = _compiled.computeCoeffs4 -interpolate4 = _compiled.interpolate4 +functions = [ + "computeCoeffs2", + "interpolate2", + "computeCoeffs3", + "interpolate3", + "computeCoeffs4", + "interpolate4", +] + +loader = TorchOpLoader(__name__, sources, functions) + +computeCoeffs2 = loader.computeCoeffs2 +interpolate2 = loader.interpolate2 +computeCoeffs3 = loader.computeCoeffs3 +interpolate3 = loader.interpolate3 +computeCoeffs4 = loader.computeCoeffs4 +interpolate4 = loader.interpolate4 diff --git a/tmol/pack/rotamer/build_rotamers.py b/tmol/pack/rotamer/build_rotamers.py index 307e46779..de3424ff8 100644 --- a/tmol/pack/rotamer/build_rotamers.py +++ b/tmol/pack/rotamer/build_rotamers.py @@ -414,7 +414,7 @@ def _t(arr): def measure_dofs_from_orig_coords( coords: Tensor[torch.float32][:, :, :], kinforest: KinForest ): - from tmol.kinematics.compiled.compiled_inverse_kin import inverse_kin + from tmol.kinematics.compiled.compiled_ops import inverse_kin kinforest_coords = coords.view(-1, 3)[kinforest.id.to(torch.int64)] kinforest_coords[0, :] = 0 # reset root diff --git a/tmol/pack/rotamer/dunbrack/compiled.ops.cpp b/tmol/pack/rotamer/dunbrack/compiled.ops.cpp index 86637af8f..2360edf62 100644 --- a/tmol/pack/rotamer/dunbrack/compiled.ops.cpp +++ b/tmol/pack/rotamer/dunbrack/compiled.ops.cpp @@ -150,6 +150,11 @@ TORCH_LIBRARY_(TORCH_EXTENSION_NAME, m) { m.def("dun_sample_chi", &dun_sample_chi); } +// #define PYBIND11_MODULE_(ns, m) PYBIND11_MODULE(ns, m) +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("dun_sample_chi", &dun_sample_chi); +} + } // namespace dunbrack } // namespace rotamer } // namespace pack diff --git a/tmol/pack/rotamer/dunbrack/compiled.py b/tmol/pack/rotamer/dunbrack/compiled.py index 93cbe1a97..f2240546c 100644 --- a/tmol/pack/rotamer/dunbrack/compiled.py +++ b/tmol/pack/rotamer/dunbrack/compiled.py @@ -1,15 +1,11 @@ -import torch -from tmol.utility.cpp_extension import load, relpaths, modulename, cuda_if_available +from tmol.utility.cpp_extension import relpaths, TorchOpLoader - -load( - modulename(__name__), - cuda_if_available( - relpaths(__file__, ["compiled.ops.cpp", "compiled.cpu.cpp", "compiled.cuda.cu"]) - ), - is_python_module=False, +sources = relpaths( + __file__, ["compiled.ops.cpp", "compiled.cpu.cpp", "compiled.cuda.cu"] ) -_ops = getattr(torch.ops, modulename(__name__)) +functions = ["dun_sample_chi"] + +loader = TorchOpLoader(__name__, sources, functions) -dun_sample_chi = _ops.dun_sample_chi +dun_sample_chi = loader.dun_sample_chi diff --git a/tmol/pack/rotamer/single_residue_kinforest.py b/tmol/pack/rotamer/single_residue_kinforest.py index b923a15a1..1b3034a7f 100644 --- a/tmol/pack/rotamer/single_residue_kinforest.py +++ b/tmol/pack/rotamer/single_residue_kinforest.py @@ -7,7 +7,7 @@ from tmol.kinematics.scan_ordering import KinForestScanOrdering from tmol.kinematics.old.builder import _KinematicBuilder -from tmol.kinematics.compiled.compiled_inverse_kin import inverse_kin +from tmol.kinematics.compiled.compiled_ops import inverse_kin from tmol.chemical.restypes import RefinedResidueType from tmol.pose.packed_block_types import PackedBlockTypes diff --git a/tmol/pack/sim_anneal/compiled/compiled.ops.cpp b/tmol/pack/sim_anneal/compiled/compiled.ops.cpp index eee369bce..d9558f312 100644 --- a/tmol/pack/sim_anneal/compiled/compiled.ops.cpp +++ b/tmol/pack/sim_anneal/compiled/compiled.ops.cpp @@ -334,6 +334,25 @@ TORCH_LIBRARY_(TORCH_EXTENSION_NAME, m) { m.def("run_sim_annealing", &run_sim_annealing); } +// #define PYBIND11_MODULE_(ns, m) PYBIND11_MODULE(ns, m) +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def( + "pick_random_rotamers", + &pick_random_rotamers); + m.def( + "metropolis_accept_reject", + &metropolis_accept_reject); + m.def("create_sim_annealer", &create_sim_annealer); + m.def("delete_sim_annealer", &delete_sim_annealer); + m.def( + "register_standard_random_rotamer_picker", + ®ister_standard_random_rotamer_picker); + m.def( + "register_standard_metropolis_accept_or_rejector", + ®ister_standard_metropolis_accept_or_rejector); + m.def("run_sim_annealing", &run_sim_annealing); +} + } // namespace compiled } // namespace sim_anneal } // namespace pack diff --git a/tmol/pack/sim_anneal/compiled/compiled.py b/tmol/pack/sim_anneal/compiled/compiled.py index e7dd9f186..7abd27c63 100644 --- a/tmol/pack/sim_anneal/compiled/compiled.py +++ b/tmol/pack/sim_anneal/compiled/compiled.py @@ -1,31 +1,34 @@ -import torch -from tmol.utility.cpp_extension import load, relpaths, modulename, cuda_if_available +from tmol.utility.cpp_extension import relpaths, TorchOpLoader -load( - modulename(__name__), - cuda_if_available( - relpaths( - __file__, - [ - "annealer.cpu.cpp", - "annealer.cuda.cu", - "compiled.ops.cpp", - "compiled.cpu.cpp", - "compiled.cuda.cu", - ], - ) - ), - is_python_module=False, +sources = relpaths( + __file__, + [ + "annealer.cpu.cpp", + "annealer.cuda.cu", + "compiled.ops.cpp", + "compiled.cpu.cpp", + "compiled.cuda.cu", + ], ) -_ops = getattr(torch.ops, modulename(__name__)) +functions = [ + "pick_random_rotamers", + "metropolis_accept_reject", + "create_sim_annealer", + "delete_sim_annealer", + "register_standard_random_rotamer_picker", + "register_standard_metropolis_accept_or_rejector", + "run_sim_annealing", +] -pick_random_rotamers = _ops.pick_random_rotamers -metropolis_accept_reject = _ops.metropolis_accept_reject -create_sim_annealer = _ops.create_sim_annealer -delete_sim_annealer = _ops.delete_sim_annealer -register_standard_random_rotamer_picker = _ops.register_standard_random_rotamer_picker +loader = TorchOpLoader(__name__, sources, functions) + +pick_random_rotamers = loader.pick_random_rotamers +metropolis_accept_reject = loader.metropolis_accept_reject +create_sim_annealer = loader.create_sim_annealer +delete_sim_annealer = loader.delete_sim_annealer +register_standard_random_rotamer_picker = loader.register_standard_random_rotamer_picker register_standard_metropolis_accept_or_rejector = ( - _ops.register_standard_metropolis_accept_or_rejector + loader.register_standard_metropolis_accept_or_rejector ) -run_sim_annealing = _ops.run_sim_annealing +run_sim_annealing = loader.run_sim_annealing diff --git a/tmol/pose/compiled/apsp_ops.py b/tmol/pose/compiled/apsp_ops.py index 729ef6054..ec08f5d79 100644 --- a/tmol/pose/compiled/apsp_ops.py +++ b/tmol/pose/compiled/apsp_ops.py @@ -1,15 +1,12 @@ import torch -from tmol.utility.cpp_extension import load, relpaths, modulename, cuda_if_available -load( - modulename(__name__), - cuda_if_available( - relpaths(__file__, ["apsp_vestibule.ops.cpp", "apsp.cpu.cpp", "apsp.cuda.cu"]) - ), - is_python_module=False, -) +from tmol.utility.cpp_extension import relpaths, TorchOpLoader -_ops = getattr(torch.ops, modulename(__name__)) +sources = relpaths(__file__, ["apsp_vestibule.ops.cpp", "apsp.cpu.cpp", "apsp.cuda.cu"]) + +functions = ["apsp_op"] + +loader = TorchOpLoader(__name__, sources, functions) def stacked_apsp(weights, threshold=-1): @@ -26,6 +23,6 @@ def stacked_apsp(weights, threshold=-1): thresholded for the GPU version as its implementation does not respect the threshold parameter. """ - _ops.apsp_op(weights, threshold) + loader.apsp_op(weights, threshold) if threshold > 0 and weights.device != torch.device("cpu"): weights[weights > threshold] = threshold diff --git a/tmol/pose/compiled/apsp_vestibule.ops.cpp b/tmol/pose/compiled/apsp_vestibule.ops.cpp index 1b92ee678..3132171e2 100644 --- a/tmol/pose/compiled/apsp_vestibule.ops.cpp +++ b/tmol/pose/compiled/apsp_vestibule.ops.cpp @@ -31,5 +31,8 @@ void apsp_op(Tensor stacked_distances, int64_t cutoff) { TORCH_LIBRARY_(TORCH_EXTENSION_NAME, m) { m.def("apsp_op", &apsp_op); } +// #define PYBIND11_MODULE_(ns, m) PYBIND11_MODULE(ns, m) +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("apsp_op", &apsp_op); } + } // namespace pose } // namespace tmol diff --git a/tmol/score/backbone_torsion/potentials/compiled.ops.cpp b/tmol/score/backbone_torsion/potentials/compiled.ops.cpp index 969686ad2..facfe78ff 100644 --- a/tmol/score/backbone_torsion/potentials/compiled.ops.cpp +++ b/tmol/score/backbone_torsion/potentials/compiled.ops.cpp @@ -249,6 +249,13 @@ TORCH_LIBRARY_(TORCH_EXTENSION_NAME, m) { &backbone_torsion_pose_score_op); } +// #define PYBIND11_MODULE_(ns, m) PYBIND11_MODULE(ns, m) +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def( + "backbone_torsion_pose_score", + &backbone_torsion_pose_score_op); +} + } // namespace potentials } // namespace backbone_torsion } // namespace score diff --git a/tmol/score/backbone_torsion/potentials/compiled.py b/tmol/score/backbone_torsion/potentials/compiled.py index abfaa7279..1142711c8 100644 --- a/tmol/score/backbone_torsion/potentials/compiled.py +++ b/tmol/score/backbone_torsion/potentials/compiled.py @@ -1,22 +1,16 @@ -import torch -from tmol.utility.cpp_extension import load, relpaths, modulename, cuda_if_available +from tmol.utility.cpp_extension import relpaths, TorchOpLoader - -_compiled = load( - modulename(__name__), - cuda_if_available( - relpaths( - __file__, - [ - "compiled.ops.cpp", - "backbone_torsion_pose_score.cpu.cpp", - "backbone_torsion_pose_score.cuda.cu", - ], - ) - ), - is_python_module=False, +sources = relpaths( + __file__, + [ + "compiled.ops.cpp", + "backbone_torsion_pose_score.cpu.cpp", + "backbone_torsion_pose_score.cuda.cu", + ], ) -_ops = getattr(torch.ops, modulename(__name__)) +functions = ["backbone_torsion_pose_score"] + +loader = TorchOpLoader(__name__, sources, functions) -backbone_torsion_pose_score = _ops.backbone_torsion_pose_score +backbone_torsion_pose_score = loader.backbone_torsion_pose_score diff --git a/tmol/score/cartbonded/potentials/compiled.ops.cpp b/tmol/score/cartbonded/potentials/compiled.ops.cpp index 4a84814e9..4e127fda5 100644 --- a/tmol/score/cartbonded/potentials/compiled.ops.cpp +++ b/tmol/score/cartbonded/potentials/compiled.ops.cpp @@ -229,6 +229,11 @@ TORCH_LIBRARY_(TORCH_EXTENSION_NAME, m) { m.def("cartbonded_pose_scores", &cartbonded_pose_scores_op); } +// #define PYBIND11_MODULE_(ns, m) PYBIND11_MODULE(ns, m) +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("cartbonded_pose_scores", &cartbonded_pose_scores_op); +} + } // namespace potentials } // namespace cartbonded } // namespace score diff --git a/tmol/score/cartbonded/potentials/compiled.py b/tmol/score/cartbonded/potentials/compiled.py index c774d7536..c6f1816fa 100644 --- a/tmol/score/cartbonded/potentials/compiled.py +++ b/tmol/score/cartbonded/potentials/compiled.py @@ -1,20 +1,16 @@ -import torch -from tmol.utility.cpp_extension import load, relpaths, modulename, cuda_if_available +from tmol.utility.cpp_extension import relpaths, TorchOpLoader -load( - modulename(__name__), - cuda_if_available( - relpaths( - __file__, - [ - "compiled.ops.cpp", - "cartbonded_pose_score.cpu.cpp", - "cartbonded_pose_score.cuda.cu", - ], - ) - ), - is_python_module=False, +sources = relpaths( + __file__, + [ + "compiled.ops.cpp", + "cartbonded_pose_score.cpu.cpp", + "cartbonded_pose_score.cuda.cu", + ], ) -_ops = getattr(torch.ops, modulename(__name__)) -cartbonded_pose_scores = _ops.cartbonded_pose_scores +functions = ["cartbonded_pose_scores"] + +loader = TorchOpLoader(__name__, sources, functions) + +cartbonded_pose_scores = loader.cartbonded_pose_scores diff --git a/tmol/score/constraint/potentials/compiled.ops.cpp b/tmol/score/constraint/potentials/compiled.ops.cpp index 9b5d010dc..c1f6c08ce 100644 --- a/tmol/score/constraint/potentials/compiled.ops.cpp +++ b/tmol/score/constraint/potentials/compiled.ops.cpp @@ -92,6 +92,11 @@ TORCH_LIBRARY_(TORCH_EXTENSION_NAME, m) { m.def("get_torsion_angle", &get_torsion_angle_op); } +// #define PYBIND11_MODULE_(ns, m) PYBIND11_MODULE(ns, m) +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("get_torsion_angle", &get_torsion_angle_op); +} + } // namespace potentials } // namespace constraint } // namespace score diff --git a/tmol/score/constraint/potentials/compiled.py b/tmol/score/constraint/potentials/compiled.py index 9c4555fa0..0a762e6bf 100644 --- a/tmol/score/constraint/potentials/compiled.py +++ b/tmol/score/constraint/potentials/compiled.py @@ -1,20 +1,12 @@ -import torch -from tmol.utility.cpp_extension import load, relpaths, modulename, cuda_if_available +from tmol.utility.cpp_extension import relpaths, TorchOpLoader -load( - modulename(__name__), - cuda_if_available( - relpaths( - __file__, - [ - "compiled.ops.cpp", - "constraint_score.cpu.cpp", - "constraint_score.cuda.cu", - ], - ) - ), - is_python_module=False, +sources = relpaths( + __file__, + ["compiled.ops.cpp", "constraint_score.cpu.cpp", "constraint_score.cuda.cu"], ) -_ops = getattr(torch.ops, modulename(__name__)) -get_torsion_angle = _ops.get_torsion_angle +functions = ["get_torsion_angle"] + +loader = TorchOpLoader(__name__, sources, functions) + +get_torsion_angle = loader.get_torsion_angle diff --git a/tmol/score/disulfide/potentials/compiled.ops.cpp b/tmol/score/disulfide/potentials/compiled.ops.cpp index 1f13f504a..d7a44bcaf 100644 --- a/tmol/score/disulfide/potentials/compiled.ops.cpp +++ b/tmol/score/disulfide/potentials/compiled.ops.cpp @@ -186,6 +186,11 @@ TORCH_LIBRARY_(TORCH_EXTENSION_NAME, m) { m.def("disulfide_pose_scores", &disulfide_pose_scores_op); } +// #define PYBIND11_MODULE_(ns, m) PYBIND11_MODULE(ns, m) +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("disulfide_pose_scores", &disulfide_pose_scores_op); +} + } // namespace potentials } // namespace disulfide } // namespace score diff --git a/tmol/score/disulfide/potentials/compiled.py b/tmol/score/disulfide/potentials/compiled.py index 566db584e..6b2628b15 100644 --- a/tmol/score/disulfide/potentials/compiled.py +++ b/tmol/score/disulfide/potentials/compiled.py @@ -1,21 +1,16 @@ -import torch -from tmol.utility.cpp_extension import load, relpaths, modulename, cuda_if_available +from tmol.utility.cpp_extension import relpaths, TorchOpLoader -load( - modulename(__name__), - cuda_if_available( - relpaths( - __file__, - [ - "compiled.ops.cpp", - "disulfide_pose_score.cpu.cpp", - "disulfide_pose_score.cuda.cu", - ], - ) - ), - is_python_module=False, +sources = relpaths( + __file__, + [ + "compiled.ops.cpp", + "disulfide_pose_score.cpu.cpp", + "disulfide_pose_score.cuda.cu", + ], ) -_ops = getattr(torch.ops, modulename(__name__)) +functions = ["disulfide_pose_scores"] -disulfide_pose_scores = _ops.disulfide_pose_scores +loader = TorchOpLoader(__name__, sources, functions) + +disulfide_pose_scores = loader.disulfide_pose_scores diff --git a/tmol/score/dunbrack/potentials/compiled.ops.cpp b/tmol/score/dunbrack/potentials/compiled.ops.cpp index 719ed02f6..35ce4b8ab 100644 --- a/tmol/score/dunbrack/potentials/compiled.ops.cpp +++ b/tmol/score/dunbrack/potentials/compiled.ops.cpp @@ -421,6 +421,11 @@ TORCH_LIBRARY_(TORCH_EXTENSION_NAME, m) { m.def("dunbrack_pose_scores", &dunbrack_pose_scores_op); } +// #define PYBIND11_MODULE_(ns, m) PYBIND11_MODULE(ns, m) +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("dunbrack_pose_scores", &dunbrack_pose_scores_op); +} + } // namespace potentials } // namespace dunbrack } // namespace score diff --git a/tmol/score/dunbrack/potentials/compiled.py b/tmol/score/dunbrack/potentials/compiled.py index 6bd3f6a37..cf22677f8 100644 --- a/tmol/score/dunbrack/potentials/compiled.py +++ b/tmol/score/dunbrack/potentials/compiled.py @@ -1,21 +1,12 @@ -import torch -from tmol.utility.cpp_extension import load, relpaths, modulename, cuda_if_available +from tmol.utility.cpp_extension import relpaths, TorchOpLoader -load( - modulename(__name__), - cuda_if_available( - relpaths( - __file__, - [ - "compiled.ops.cpp", - "dunbrack_pose_score.cpu.cpp", - "dunbrack_pose_score.cuda.cu", - ], - ) - ), - is_python_module=False, +sources = relpaths( + __file__, + ["compiled.ops.cpp", "dunbrack_pose_score.cpu.cpp", "dunbrack_pose_score.cuda.cu"], ) -_ops = getattr(torch.ops, modulename(__name__)) +functions = ["dunbrack_pose_scores"] -dunbrack_pose_scores = _ops.dunbrack_pose_scores +loader = TorchOpLoader(__name__, sources, functions) + +dunbrack_pose_scores = loader.dunbrack_pose_scores diff --git a/tmol/score/elec/potentials/compiled.ops.cpp b/tmol/score/elec/potentials/compiled.ops.cpp index 0dad0d6b0..dbc864249 100644 --- a/tmol/score/elec/potentials/compiled.ops.cpp +++ b/tmol/score/elec/potentials/compiled.ops.cpp @@ -248,6 +248,11 @@ TORCH_LIBRARY_(TORCH_EXTENSION_NAME, m) { m.def("elec_pose_scores", &elec_pose_scores_op); } +// #define PYBIND11_MODULE_(ns, m) PYBIND11_MODULE(ns, m) +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("elec_pose_scores", &elec_pose_scores_op); +} + } // namespace potentials } // namespace elec } // namespace score diff --git a/tmol/score/elec/potentials/compiled.py b/tmol/score/elec/potentials/compiled.py index 1286e9d6c..b0b05fa9c 100644 --- a/tmol/score/elec/potentials/compiled.py +++ b/tmol/score/elec/potentials/compiled.py @@ -1,21 +1,11 @@ -import torch -from tmol.utility.cpp_extension import load, relpaths, modulename, cuda_if_available +from tmol.utility.cpp_extension import relpaths, TorchOpLoader -load( - modulename(__name__), - cuda_if_available( - relpaths( - __file__, - [ - "compiled.ops.cpp", - "elec_pose_score.cpu.cpp", - "elec_pose_score.cuda.cu", - ], - ) - ), - is_python_module=False, +sources = relpaths( + __file__, ["compiled.ops.cpp", "elec_pose_score.cpu.cpp", "elec_pose_score.cuda.cu"] ) -_ops = getattr(torch.ops, modulename(__name__)) +functions = ["elec_pose_scores"] -elec_pose_scores = _ops.elec_pose_scores +loader = TorchOpLoader(__name__, sources, functions) + +elec_pose_scores = loader.elec_pose_scores diff --git a/tmol/score/hbond/potentials/compiled.ops.cpp b/tmol/score/hbond/potentials/compiled.ops.cpp index e418d3cc7..f568bf515 100644 --- a/tmol/score/hbond/potentials/compiled.ops.cpp +++ b/tmol/score/hbond/potentials/compiled.ops.cpp @@ -342,6 +342,11 @@ TORCH_LIBRARY_(TORCH_EXTENSION_NAME, m) { m.def("hbond_pose_scores", &hbond_pose_scores_op); } +// #define PYBIND11_MODULE_(ns, m) PYBIND11_MODULE(ns, m) +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("hbond_pose_scores", &hbond_pose_scores_op); +} + } // namespace potentials } // namespace hbond } // namespace score diff --git a/tmol/score/hbond/potentials/compiled.py b/tmol/score/hbond/potentials/compiled.py index ff5879589..4c102574a 100644 --- a/tmol/score/hbond/potentials/compiled.py +++ b/tmol/score/hbond/potentials/compiled.py @@ -1,22 +1,12 @@ -import torch -from tmol.utility.cpp_extension import load, relpaths, modulename, cuda_if_available +from tmol.utility.cpp_extension import relpaths, TorchOpLoader - -load( - modulename(__name__), - cuda_if_available( - relpaths( - __file__, - [ - "compiled.ops.cpp", - "hbond_pose_score.cpu.cpp", - "hbond_pose_score.cuda.cu", - ], - ) - ), - is_python_module=False, +sources = relpaths( + __file__, + ["compiled.ops.cpp", "hbond_pose_score.cpu.cpp", "hbond_pose_score.cuda.cu"], ) -_ops = getattr(torch.ops, modulename(__name__)) +functions = ["hbond_pose_scores"] + +loader = TorchOpLoader(__name__, sources, functions) -hbond_pose_scores = _ops.hbond_pose_scores +hbond_pose_scores = loader.hbond_pose_scores diff --git a/tmol/score/ljlk/potentials/compiled.ops.cpp b/tmol/score/ljlk/potentials/compiled.ops.cpp index 37e4f1e42..c15d9ea43 100644 --- a/tmol/score/ljlk/potentials/compiled.ops.cpp +++ b/tmol/score/ljlk/potentials/compiled.ops.cpp @@ -407,6 +407,15 @@ TORCH_LIBRARY_(TORCH_EXTENSION_NAME, m) { ®ister_lj_lk_rotamer_pair_energy_eval); } +// #define PYBIND11_MODULE_(ns, m) PYBIND11_MODULE(ns, m) +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("ljlk_pose_scores", &ljlk_pose_scores_op); + m.def("score_ljlk_inter_system_scores", &rotamer_pair_energies_op); + m.def( + "register_lj_lk_rotamer_pair_energy_eval", + ®ister_lj_lk_rotamer_pair_energy_eval); +} + } // namespace potentials } // namespace ljlk } // namespace score diff --git a/tmol/score/ljlk/potentials/compiled.py b/tmol/score/ljlk/potentials/compiled.py index 340d02c32..7fd2a97ad 100644 --- a/tmol/score/ljlk/potentials/compiled.py +++ b/tmol/score/ljlk/potentials/compiled.py @@ -1,28 +1,24 @@ -import torch -from tmol.utility.cpp_extension import load, relpaths, modulename, cuda_if_available +from tmol.utility.cpp_extension import relpaths, TorchOpLoader - -load( - modulename(__name__), - cuda_if_available( - relpaths( - __file__, - [ - "compiled.ops.cpp", - "ljlk_pose_score.cpu.cpp", - "ljlk_pose_score.cuda.cu", - "rotamer_pair_energy_lj.cpu.cpp", - "rotamer_pair_energy_lj.cuda.cu", - # "rotamer_pair_energy_lk.cpu.cpp", - # "rotamer_pair_energy_lk.cuda.cu", - ], - ) - ), - is_python_module=False, +sources = relpaths( + __file__, + [ + "compiled.ops.cpp", + "ljlk_pose_score.cpu.cpp", + "ljlk_pose_score.cuda.cu", + "rotamer_pair_energy_lj.cpu.cpp", + "rotamer_pair_energy_lj.cuda.cu", + ], ) -_ops = getattr(torch.ops, modulename(__name__)) +functions = [ + "ljlk_pose_scores", + "score_ljlk_inter_system_scores", + "register_lj_lk_rotamer_pair_energy_eval", +] + +loader = TorchOpLoader(__name__, sources, functions) -ljlk_pose_scores = _ops.ljlk_pose_scores -score_ljlk_inter_system_scores = _ops.score_ljlk_inter_system_scores -register_lj_lk_rotamer_pair_energy_eval = _ops.register_lj_lk_rotamer_pair_energy_eval +ljlk_pose_scores = loader.ljlk_pose_scores +score_ljlk_inter_system_scores = loader.score_ljlk_inter_system_scores +register_lj_lk_rotamer_pair_energy_eval = loader.register_lj_lk_rotamer_pair_energy_eval diff --git a/tmol/score/lk_ball/potentials/compiled.ops.cpp b/tmol/score/lk_ball/potentials/compiled.ops.cpp index a4495e6a1..1f059db8a 100644 --- a/tmol/score/lk_ball/potentials/compiled.ops.cpp +++ b/tmol/score/lk_ball/potentials/compiled.ops.cpp @@ -598,6 +598,15 @@ TORCH_LIBRARY_(TORCH_EXTENSION_NAME, m) { m.def("gen_pose_waters", &pose_watergen_op); } +// #define PYBIND11_MODULE_(ns, m) PYBIND11_MODULE(ns, m) +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def( + "score_lkball_inter_system_scores", + &rotamer_pair_energies); + m.def("lk_ball_pose_score", &lkball_pose_score); + m.def("gen_pose_waters", &pose_watergen_op); +} + } // namespace potentials } // namespace lk_ball } // namespace score diff --git a/tmol/score/lk_ball/potentials/compiled.py b/tmol/score/lk_ball/potentials/compiled.py index 10d8c0e04..e3c078dd4 100644 --- a/tmol/score/lk_ball/potentials/compiled.py +++ b/tmol/score/lk_ball/potentials/compiled.py @@ -1,25 +1,21 @@ -import torch -from tmol.utility.cpp_extension import load, modulename, relpaths, cuda_if_available +from tmol.utility.cpp_extension import relpaths, TorchOpLoader -load( - modulename(__name__), - cuda_if_available( - relpaths( - __file__, - [ - "compiled.ops.cpp", - "lk_ball_pose_score.cpu.cpp", - "lk_ball_pose_score.cuda.cu", - "rotamer_pair_energy_lkball.cpu.cpp", - "rotamer_pair_energy_lkball.cuda.cu", - "gen_pose_waters.cpu.cpp", - "gen_pose_waters.cuda.cu", - ], - ) - ), - is_python_module=False, +sources = relpaths( + __file__, + [ + "compiled.ops.cpp", + "lk_ball_pose_score.cpu.cpp", + "lk_ball_pose_score.cuda.cu", + "rotamer_pair_energy_lkball.cpu.cpp", + "rotamer_pair_energy_lkball.cuda.cu", + "gen_pose_waters.cpu.cpp", + "gen_pose_waters.cuda.cu", + ], ) -_ops = getattr(torch.ops, modulename(__name__)) -gen_pose_waters = _ops.gen_pose_waters -pose_score_lk_ball = _ops.lk_ball_pose_score +functions = ["gen_pose_waters", "lk_ball_pose_score"] + +loader = TorchOpLoader(__name__, sources, functions) + +gen_pose_waters = loader.gen_pose_waters +pose_score_lk_ball = loader.lk_ball_pose_score diff --git a/tmol/tests/pack/rotamer/test_build_rotamers.py b/tmol/tests/pack/rotamer/test_build_rotamers.py index 421c62469..1bdd98597 100644 --- a/tmol/tests/pack/rotamer/test_build_rotamers.py +++ b/tmol/tests/pack/rotamer/test_build_rotamers.py @@ -309,7 +309,7 @@ def it(val, arr): met_kt_frame_y = it(0, met_rt.rotamer_kinforest.frame_y + 1) met_kt_frame_z = it(0, met_rt.rotamer_kinforest.frame_z + 1) - from tmol.kinematics.compiled.compiled_inverse_kin import inverse_kin + from tmol.kinematics.compiled.compiled_ops import inverse_kin coords = torch.cat( ( diff --git a/tmol/tests/utility/cpp_extension/test_cpp_extension.py b/tmol/tests/utility/cpp_extension/test_cpp_extension.py index 1dca01dd4..53140c0af 100644 --- a/tmol/tests/utility/cpp_extension/test_cpp_extension.py +++ b/tmol/tests/utility/cpp_extension/test_cpp_extension.py @@ -2,7 +2,13 @@ import torch -from tmol.utility.cpp_extension import load, relpaths, modulename, cuda_if_available +from tmol.utility.cpp_extension import ( + load, + relpaths, + modulename, + cuda_if_available, + get_prebuild_extensions, +) from tmol.tests.torch import requires_cuda @@ -68,3 +74,10 @@ def test_hybrid_nocuda(): if torch.cuda.is_available(): with pytest.raises(TypeError): extension_nocuda.sum(torch.ones(10, device="cuda")) + + +def test_get_prebuild_extensions_smoke(): + """ + Just make sure this function runs. Should probably inspect the output for validity with a better test + """ + get_prebuild_extensions() diff --git a/tmol/utility/cpp_extension.py b/tmol/utility/cpp_extension.py index 0d46f1448..ea3fbb035 100644 --- a/tmol/utility/cpp_extension.py +++ b/tmol/utility/cpp_extension.py @@ -1,7 +1,9 @@ import pathlib import os -from functools import wraps +import sys +from functools import wraps, partial import warnings +import importlib from ..extern import include_paths as extern_include_paths from .. import include_paths as tmol_include_paths @@ -141,6 +143,12 @@ def relpaths(src_path, paths): return [str(pathlib.Path(src_path).parent / s) for s in paths] +def actual_relpaths(src_path, paths): + if isinstance(paths, (str, bytes)): + paths = [paths] + return [str(pathlib.Path(src_path).parent / s) for s in paths] + + def modulename(src_name): """Adapt module name to valid cpp extension name. @@ -151,3 +159,97 @@ def modulename(src_name): """ return src_name.replace(".", "_") + + +def get_prebuild_extensions(): + + from pkgutil import iter_modules + from setuptools import find_packages + + name_filter = {"compiled", "compiled_ops", "apsp_ops"} + + compiled_modules = [] + for pkg in find_packages("."): + pkgpath = "." + "/" + pkg.replace(".", "/") + for info in iter_modules([pkgpath]): + if not info.ispkg and info.name in name_filter and "test" not in pkgpath: + module_info = {"name": pkg + "." + info.name} + # print(module_info) + + importlib.import_module(module_info["name"]) + func = getattr( + getattr(sys.modules[module_info["name"]], "loader"), + "build_CUDA_extension", + ) + compiled_modules.append(func("/home/jflat06/rosetta/tmol")) + + return compiled_modules + + +class TorchOpLoader: + _suffix = "_internal" + + def __init__(self, fname, sources, functions): + self.name = modulename(fname + "_CUDA") + self.sources = sources + self.functions = functions + + for function in self.functions: + setattr(self, function, partial(self.check_load_and_run, function)) + + pass + + def check_load_and_run(self, function, *args, **kwargs): + if not hasattr(self, function + self._suffix): + self.load_mod() + + return getattr(self, function + self._suffix)(*args, **kwargs) + + def build_CUDA_extension(self, f, **kwargs): + src = [os.path.relpath(s, f) for s in self.sources] + # print(src) + + kwargs = _augment_kwargs(self.name, src, **kwargs) + kwargs["extra_cuda_cflags"] += ["-gencode=arch=compute_75,code=sm_75"] + extra_args = { + "cxx": kwargs["extra_cflags"], + "nvcc": kwargs["extra_cuda_cflags"], + } + + # print("KWARGS") + # print(kwargs) + + cuda_ext = torch.utils.cpp_extension.CUDAExtension( + name=self.name, + sources=src, + include_dirs=_default_include_paths, + extra_compile_args=extra_args, + ) + + return cuda_ext + + def jit_load(self): + load(self.name, cuda_if_available(self.sources), is_python_module=True) + + def load_mod(self): + try: + importlib.import_module(self.name) + + for function in self.functions: + setattr( + self, + function + self._suffix, + getattr(sys.modules[self.name], function), + ) + + except ModuleNotFoundError: + self.jit_load() + + importlib.import_module(self.name) + + for function in self.functions: + setattr( + self, + function + self._suffix, + getattr(sys.modules[self.name], function), + )