Skip to content

Commit 3a02de1

Browse files
committed
Fixed Wrench Projectors, Ad Operators.
1 parent b7daadf commit 3a02de1

10 files changed

Lines changed: 90 additions & 96 deletions

File tree

.gitignore

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
**/__pycache__
22
dist
3-
43
uv.lock
5-
6-
.vscode/
4+
.vscode/
5+
venv/
6+
.mypy_cache/

README.md

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,8 @@
1+
# PyMatLie
2+
3+
![workflow](https://github.com/luis-marques/pymatlie/actions/workflows/ci.yml/badge.svg)
4+
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](LICENSE)
5+
16
## Installation
27
Either install via `pip` through
38
```
@@ -7,7 +12,7 @@ or, for development, clone the repository and install in editable mode using
712
```
813
$ python -m venv venv
914
$ source venv/bin/activate
10-
$ pip install -e .
15+
$ pip install -e ".[dev]"
1116
```
1217

1318
## Usage

examples/.gitkeep

Whitespace-only changes.

pyproject.toml

Lines changed: 15 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -5,33 +5,31 @@ build-backend = "hatchling.build"
55

66
[project]
77
name = "pymatlie"
8-
version = "0.0.1a0"
9-
authors = [
10-
{ name="Luis Marques", email="lmarques@umich.edu" },
11-
]
8+
version = "0.0.1a1"
9+
authors = [{ name = "Luis Marques", email = "lmarques@umich.edu" }]
1210
description = "pymatlie provides an implementation of Matrix Lie Groups (MLG) used in robotics."
1311
readme = "README.md"
1412
requires-python = ">=3.9"
1513
classifiers = [
16-
"Programming Language :: Python :: 3",
17-
"Operating System :: OS Independent",
14+
"Programming Language :: Python :: 3",
15+
"Operating System :: OS Independent",
1816
]
1917
license = "MIT"
2018
license-files = ["LICEN[CS]E*"]
21-
dependencies = [
22-
"torch>=2.1",
23-
]
19+
dependencies = ["torch>=2.1"]
2420

2521

2622
[project.optional-dependencies]
2723
dev = [
28-
"black",
29-
"docformatter",
30-
"isort",
31-
"mypy",
32-
"pylint>=2.14.5",
33-
"pytest-pylint>=0.18.0",
34-
"pytest>=7.2.2",
24+
"black",
25+
"docformatter",
26+
"isort",
27+
"mypy",
28+
"pylint>=2.14.5",
29+
"pytest-pylint>=0.18.0",
30+
"pytest>=7.2.2",
31+
"pyyaml",
32+
"lark",
3533
]
3634

3735
[project.urls]
@@ -57,4 +55,4 @@ exclude = ["venv/*"]
5755

5856
[[tool.mypy.overrides]]
5957
module = ["matplotlib.*"]
60-
ignore_missing_imports = true
58+
ignore_missing_imports = true

src/pymatlie/base_group.py

Lines changed: 30 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
from abc import ABC, abstractmethod
44
from dataclasses import dataclass, field
5-
from typing import Optional, Tuple
5+
from typing import Tuple
66

77
import torch
88

@@ -44,7 +44,7 @@ def __post_init__(self):
4444
assert self.B.shape == (self.g_dim, self.u_dim), f"dynamics_step: B must be ({self.g_dim}, {self.u_dim}), got {self.B.shape}"
4545
object.__setattr__(self, "B_T", self.B.T)
4646

47-
def project_to_motion_constraints(self, g: torch.Tensor, xi: torch.Tensor) -> torch.Tensor:
47+
def project_to_motion_constraints(self, _g: torch.Tensor, xi: torch.Tensor) -> torch.Tensor:
4848
"""Project the state to the motion constraints."""
4949
return xi
5050

@@ -189,8 +189,9 @@ def right_minus(cls, g_start: torch.Tensor, g_end: torch.Tensor) -> torch.Tensor
189189
def right_invariant_error(cls, estimated_state: torch.Tensor, true_state: torch.Tensor) -> torch.Tensor:
190190
"""Computes the right invariant error between the estimated state and
191191
the true state."""
192-
assert estimated_state.shape == true_state.shape, f"right_invariant_error: mismatched shapes {estimated_state.shape} vs {true_state.shape}"
193-
return estimated_state @ cls.inverse(true_state)
192+
# assert estimated_state.shape == true_state.shape, f"right_invariant_error: mismatched shapes {estimated_state.shape} vs {true_state.shape}"
193+
# return estimated_state @ cls.inverse(true_state)
194+
return true_state @ cls.inverse(estimated_state)
194195

195196
@classmethod
196197
def left_invariant_error(cls, estimated_state: torch.Tensor, true_state: torch.Tensor) -> torch.Tensor:
@@ -199,7 +200,7 @@ def left_invariant_error(cls, estimated_state: torch.Tensor, true_state: torch.T
199200
assert estimated_state.shape == true_state.shape, f"right_invariant_error: mismatched shapes {estimated_state.shape} vs {true_state.shape}"
200201
return cls.inverse(true_state) @ estimated_state
201202

202-
def f(self, g: torch.Tensor, xi: torch.Tensor, u: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
203+
def f(self, _g: torch.Tensor, xi: torch.Tensor, u: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
203204
"""Update using Euler-Poincare equations."""
204205
D = self.g_dim
205206
assert xi.ndim == 2 and xi.shape[1] == D, f"xi must be (N, {D}), got {xi.shape}"
@@ -232,36 +233,44 @@ def update_configuration(self, g: torch.Tensor, xi: torch.Tensor, dt: float) ->
232233

233234
def update_velocity(self, xi: torch.Tensor, dxi: torch.Tensor, dt: float) -> torch.Tensor:
234235
"""Updates the velocity (Lie algebra element xi) using the Lie algebra
235-
element dxi.
236-
"""
236+
element dxi."""
237237
assert xi.ndim == 2 and xi.shape[-1] == self.g_dim, f"update_velocity: xi must be (N, {self.g_dim}), got {xi.shape}"
238238
assert dxi.ndim == 2 and dxi.shape[-1] == self.g_dim, f"update_velocity: dxi must be (N, {self.g_dim}), got {dxi.shape}"
239239
return xi + dxi * dt
240240

241+
@staticmethod
241242
@abstractmethod
242243
def map_q_to_configuration(q: torch.Tensor) -> torch.Tensor:
243244
"""Map the configuration vector to the Lie group element."""
244245
raise NotImplementedError
245246

247+
@staticmethod
246248
@abstractmethod
247249
def map_configuration_to_q(g: torch.Tensor) -> torch.Tensor:
248250
"""Map the Lie Group element to configuration space."""
249251
raise NotImplementedError
250252

253+
@staticmethod
251254
@abstractmethod
252255
def map_dq_to_velocity(q: torch.Tensor, dq: torch.Tensor) -> torch.Tensor:
253-
""" Map the velocity in configuration space to the Lie Algebra velocity."""
256+
"""Map the velocity in configuration space to the Lie Algebra
257+
velocity."""
254258
raise NotImplementedError
255259

260+
@staticmethod
256261
@abstractmethod
257262
def map_velocity_to_dq(q: torch.Tensor, velocity: torch.Tensor) -> torch.Tensor:
258-
""" Map the velocity in Lie Algebra to the configuration space velocity."""
263+
"""Map the velocity in Lie Algebra to the configuration space
264+
velocity."""
259265
raise NotImplementedError
260266

267+
268+
@dataclass(frozen=True)
261269
class NonholonomicGroup(MatrixLieGroup):
262270
"""Base class for nonholonomic matrix Lie groups."""
263271

264-
constraint_projection_matrix: Optional[torch.Tensor] = field(init=False, repr=False) # Enforces A @ xi = 0 constraint
272+
constraint_projection_matrix_velocity: torch.Tensor = field(init=False, repr=False) # Enforces A @ xi = 0 constraint
273+
constraint_projection_matrix_wrench: torch.Tensor = field(init=False, repr=False) # Enforces
265274

266275
def __init__(self, *args, **kwargs):
267276
super().__init__(*args, **kwargs)
@@ -276,25 +285,29 @@ def __init__(self, *args, **kwargs):
276285
P = self.inertia_matrix_inv @ A_matrix.T @ lambda_solver @ A_matrix # inertia_matrix^-1 @ A^T @ (A @ inertia_matrix^-1 @ A^T)^-1 @ A
277286
I_minus_P = torch.eye(self.g_dim, device=self.inertia_matrix.device) - P
278287

279-
object.__setattr__(self, "constraint_projection_matrix", I_minus_P.T)
288+
PI = (
289+
torch.eye(self.g_dim, device=self.inertia_matrix.device)
290+
- A_matrix.T @ torch.linalg.inv(A_matrix @ self.inertia_matrix_inv @ A_matrix.T) @ A_matrix @ self.inertia_matrix_inv
291+
)
292+
293+
object.__setattr__(self, "constraint_projection_matrix_velocity", I_minus_P) # Storing transpose for batch multiplication purposes
294+
object.__setattr__(self, "constraint_projection_matrix_wrench", PI)
280295

281-
def project_to_motion_constraints(self, g: torch.Tensor, xi: torch.Tensor) -> torch.Tensor:
296+
def project_to_motion_constraints(self, _g: torch.Tensor, xi: torch.Tensor) -> torch.Tensor:
282297
"""Project the state to the motion constraints."""
283-
return xi @ self.constraint_projection_matrix
298+
return xi @ self.constraint_projection_matrix_velocity.T
284299

285300
@abstractmethod
286-
def get_Pfaffian_A(self, g:torch.Tensor, xi: torch.Tensor) -> torch.Tensor:
301+
def get_Pfaffian_A(self, g: torch.Tensor, xi: torch.Tensor) -> torch.Tensor:
287302
"""Computes the Pfaffian A of the nonholonomic group."""
288303
raise NotImplementedError
289-
# TODO: can this be made a property on subclass if they are constant???
290304

291305
def f(self, g: torch.Tensor, xi: torch.Tensor, u: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
292306
"""Update using Euler-Poincare equations."""
293307
# _, xi_dot = super().f(g, xi, u)
294308
D = self.g_dim
295309
assert xi.ndim == 2 and xi.shape[1] == D, f"xi must be (N, {D}), got {xi.shape}"
296310
assert u.shape[1] == self.u_dim and u.ndim == 2, f"u must be (N, {self.u_dim}), got {u.shape}"
297-
# TODO: clean this and holonomic to use same interface as state-space dynamics (i.e. get forces, ...)
298311
Bu = u @ self.B_T # Bu in batched form
299312

300313
coad = self.coadjoint_operator(xi) # (N, D, D)
@@ -303,4 +316,4 @@ def f(self, g: torch.Tensor, xi: torch.Tensor, u: torch.Tensor) -> Tuple[torch.T
303316
xi_dot = (self.inertia_matrix_inv @ rhs.T).T
304317
xi_dot_proj = self.project_to_motion_constraints(g, xi_dot)
305318

306-
return xi, xi_dot_proj
319+
return xi, xi_dot_proj

src/pymatlie/se2.py

Lines changed: 20 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
from typing import Tuple
55

66
import torch
7+
78
from pymatlie.base_group import MatrixLieGroup
89
from pymatlie.so2 import SO2
910

@@ -53,7 +54,6 @@ def logm(g: torch.Tensor) -> torch.Tensor:
5354
tau_hat[..., :2, 2] = rho.squeeze(-1) # Set the translation part
5455
return tau_hat
5556

56-
#TODO: change back to this
5757
# # @torch.compile
5858
# @staticmethod
5959
# def exp(tau: torch.Tensor) -> torch.Tensor:
@@ -93,29 +93,28 @@ def left_jacobian(cls, tau: torch.Tensor) -> torch.Tensor:
9393
jac[..., 1, 2] = (theta * y - x + x * torch.cos(theta) - y * torch.sin(theta)) / (theta**2 + 1e-15)
9494
return jac
9595

96-
# TODO: check this
9796
@classmethod
9897
def left_jacobian_inverse(cls, tau: torch.Tensor) -> torch.Tensor:
99-
"""
100-
Analytic inverse of the left‐Jacobian J(τ) for SE(2).
101-
tau is shape (B,3) = [ρ_x, ρ_y, φ].
102-
Returns J(τ)^{-1}, shape (B,3,3).
98+
"""Analytic inverse of the left‐Jacobian J(τ) for SE(2).
99+
100+
tau is shape (B,3) = [ρ_x, ρ_y, φ]. Returns J(τ)^{-1}, shape
101+
(B,3,3).
103102
"""
104103
assert tau.ndim == 2 and tau.shape[1] == 3
105104
B = tau.shape[0]
106-
φ = tau[:, 2] # (B,)
105+
phi = tau[:, 2] # (B,)
107106
# 1) invert the 2×2 SO(2) left‐Jacobian on φ
108-
J2_inv = SO2.left_jacobian_inverse(φ.unsqueeze(-1)) # (B,2,2)
107+
J2_inv = SO2.left_jacobian_inverse(phi.unsqueeze(-1)) # (B,2,2)
109108

110109
# 2) grab the ɡ terms from your existing left_jacobian
111-
full_J = cls.left_jacobian(tau) # (B,3,3)
112-
trans = full_J[:, :2, 2] # (B,2)
110+
full_J = cls.left_jacobian(tau) # (B,3,3)
111+
trans = full_J[:, :2, 2] # (B,2)
113112

114113
# 3) build the inverse: block‐upper triangular
115114
inv = torch.zeros((B, 3, 3), device=tau.device, dtype=tau.dtype)
116115
inv[:, :2, :2] = J2_inv
117-
inv[:, :2, 2] = -(J2_inv @ trans.unsqueeze(-1)).squeeze(-1)
118-
inv[:, 2, 2] = 1.0
116+
inv[:, :2, 2] = -(J2_inv @ trans.unsqueeze(-1)).squeeze(-1)
117+
inv[:, 2, 2] = 1.0
119118
return inv
120119

121120
@staticmethod
@@ -161,18 +160,18 @@ def compute_twist_map_inverse(q: torch.Tensor) -> torch.Tensor:
161160
"""Compute J⁻¹(q)=J(q)^T mapping ξ→q̇ without any pinv."""
162161
J = SE2.compute_twist_map(q)
163162
return J.transpose(-2, -1)
164-
163+
165164
@staticmethod
166165
def map_q_to_configuration(q: torch.Tensor) -> torch.Tensor:
167-
assert q.ndim == 2 and q.shape[-1] == SE2.g_dim, f"euclidean_vector_to_g requires shape (N, {SE2.g_dim}), got {q.shape}"
166+
assert q.ndim == 2 and q.shape[-1] == SE2.g_dim, f"map_q_to_configuration requires shape (N, {SE2.g_dim}), got {q.shape}"
168167
g = torch.eye(3, device=q.device).repeat(*q.shape[:-1], 1, 1)
169168
g[..., 0:2, 2] = q[..., :2]
170169
g[..., :2, :2] = SO2.exp(q[..., 2].unsqueeze(-1))
171170
return g
172-
171+
173172
@staticmethod
174173
def map_configuration_to_q(g: torch.Tensor) -> torch.Tensor:
175-
assert g.ndim == 3 and g.shape[-2:] == SE2.matrix_size, f"g_to_euclidean_vector requires shape (N, {SE2.matrix_size}), got {g.shape}"
174+
assert g.ndim == 3 and g.shape[-2:] == SE2.matrix_size, f"map_configuration_to_q requires shape (N, {SE2.matrix_size}), got {g.shape}"
176175
theta = torch.atan2(g[..., 1, 0], g[..., 0, 0])
177176
x = g[..., 0, 2] # (N,)
178177
y = g[..., 1, 2] # (N,)
@@ -186,12 +185,12 @@ def map_dq_to_velocity(q: torch.Tensor, dq: torch.Tensor) -> torch.Tensor:
186185
xi = torch.bmm(J_twist, dq.unsqueeze(-1)).squeeze(-1) # (N, g_dim)
187186
# assert xi.shape == (dq.shape[0], self.g_dim), f"xi shape mismatch: expected {(dq.shape[0], self.g_dim)}, got {xi.shape}"
188187
return xi
189-
188+
190189
@staticmethod
191-
def map_velocity_to_dq(q: torch.Tensor, vel: torch.Tensor) -> torch.Tensor:
190+
def map_velocity_to_dq(q: torch.Tensor, velocity: torch.Tensor) -> torch.Tensor:
192191
assert q.ndim == 2 and q.shape[1] == SE2.g_dim, f"map_velocity_to_dq requires shape (N, {SE2.g_dim}), got {q.shape}"
193-
assert vel.ndim == 2 and vel.shape[1] == SE2.g_dim, f"velocity_to_dq requires shape (N, {SE2.g_dim}), got {vel.shape}"
192+
assert velocity.ndim == 2 and velocity.shape[1] == SE2.g_dim, f"velocity_to_dq requires shape (N, {SE2.g_dim}), got {velocity.shape}"
194193
J_twist_inv = SE2.compute_twist_map_inverse(q)
195-
dq = torch.bmm(J_twist_inv, vel.unsqueeze(-1)).squeeze(-1)
194+
dq = torch.bmm(J_twist_inv, velocity.unsqueeze(-1)).squeeze(-1)
196195
# assert dq.shape == (vel.shape[0], self.g_dim), f"dq shape mismatch: expected {(N, self.g_dim)}, got {dq.shape}"
197-
return dq
196+
return dq

src/pymatlie/so2.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,10 @@
11
"""SO(2) Lie group implementation."""
22

33
import torch
4+
45
from pymatlie.base_group import MatrixLieGroup
5-
from pymatlie.vecops import sincu, versine_over_x, sinc_taylor, versine_over_x_taylor
6+
from pymatlie.vecops import sincu, versine_over_x
7+
68

79
class SO2(MatrixLieGroup):
810
"""SO(2) Lie group implementation (batch-only)."""
@@ -67,17 +69,16 @@ def left_jacobian(tau: torch.Tensor) -> torch.Tensor:
6769
tau = tau[..., 0] # (N,)
6870
I = torch.eye(2, dtype=tau.dtype, device=tau.device) # (2, 2)
6971
skew = SO2.skew_sym().to(tau.device).to(tau.dtype) # (2, 2)
70-
# TODO: compare with torch.sincu and torch.versine_over_x (from vecops...)
71-
return sinc_taylor(tau)[..., None, None] * I + versine_over_x_taylor(tau)[..., None, None] * skew
72+
return sincu(tau)[..., None, None] * I + versine_over_x(tau)[..., None, None] * skew
7273

7374
@staticmethod
7475
def left_jacobian_inverse(tau: torch.Tensor) -> torch.Tensor:
7576
"""Inverse of left Jacobian of SO(2), input shape (N, 1), output (N, 2,
7677
2)."""
7778
assert tau.ndim == 2 and tau.shape[-1] == SO2.g_dim, "left_jacobian_inverse requires shape (N, 1)"
7879
tau = tau[..., 0]
79-
A = sinc_taylor(tau) # (N,)
80-
B = versine_over_x_taylor(tau) # (N,)
80+
A = sincu(tau) # (N,)
81+
B = versine_over_x(tau) # (N,)
8182
denom = A**2 + B**2 # (N,)
8283

8384
row0 = torch.stack([A, B], dim=-1) # (N, 2)
@@ -91,13 +92,13 @@ def adjoint_matrix(g: torch.Tensor) -> torch.Tensor:
9192
return torch.eye(2, dtype=g.dtype, device=g.device).repeat(g.shape[0], 1, 1)
9293

9394
@staticmethod
94-
def g_to_euclidean_vector(g: torch.Tensor) -> torch.Tensor:
95+
def map_configuration_to_q(g: torch.Tensor) -> torch.Tensor:
9596
"""Converts a Lie group element (SO(2) matrix) to a Lie algebra
9697
element."""
9798
return SO2.log(g)
9899

99100
@staticmethod
100-
def euclidean_vector_to_g(x):
101+
def map_q_to_configuration(q: torch.Tensor) -> torch.Tensor:
101102
"""Converts a Lie algebra element (SO(2) vector) to a Lie group
102103
element."""
103-
return SO2.exp(x)
104+
return SO2.exp(q)

src/pymatlie/vecops.py

Lines changed: 0 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -14,23 +14,3 @@ def versine_over_x(x: torch.Tensor) -> torch.Tensor:
1414
versine_over_x(x) = (1 - cos(x)) / x
1515
"""
1616
return x.new_tensor(0.5) * x * torch.square(sincu(x / x.new_tensor(2.0)))
17-
18-
19-
def sinc_taylor(phi, eps=1e-4):
20-
# phi: (...,) tensor
21-
small = phi.abs() < eps
22-
out = torch.empty_like(phi)
23-
# far from zero: use standard
24-
out[~small] = torch.sin(phi[~small]) / phi[~small]
25-
# near zero: 1 - φ²/6 + φ⁴/120
26-
x = phi[small]
27-
out[small] = 1 - x**2/6 + x**4/120
28-
return out
29-
30-
def versine_over_x_taylor(phi, eps=1e-4):
31-
small = phi.abs() < eps
32-
out = torch.empty_like(phi)
33-
out[~small] = (1 - torch.cos(phi[~small])) / phi[~small]
34-
x = phi[small]
35-
out[small] = x/2 - x**3/24 + x**5/720
36-
return out

0 commit comments

Comments
 (0)