Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Allow batch computations for jacobian with pytorch #75

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 32 additions & 10 deletions src/adam/core/rbd_algorithms.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,16 +167,24 @@ def joints_jacobian(
chain = self.model.get_joints_chain(self.root_link, frame)
eye = self.math.factory.eye(4)
B_H_j = eye
J = self.math.factory.zeros(6, self.NDoF)
B_H_L = self.forward_kinematics(frame, eye, joint_positions)
L_H_B = self.math.homogeneous_inverse(B_H_L)
J_list = []
joint_J_dict = {}
for joint in chain:
q = joint_positions[joint.idx] if joint.idx is not None else 0.0
H_j = joint.homogeneous(q=q)
B_H_j = B_H_j @ H_j
L_H_j = L_H_B @ B_H_j
if joint.idx is not None:
J[:, joint.idx] = self.math.adjoint(L_H_j) @ joint.motion_subspace()
joint_J_dict[joint.idx] = self.math.adjoint(L_H_j) @ joint.motion_subspace()

for i in range(self.NDoF):
if i in joint_J_dict:
J_list.append(joint_J_dict[i])
else:
J_list.append(self.math.factory.zeros(6))
J = self.math.stack_list(J_list)
return J

def jacobian(
Expand All @@ -186,9 +194,7 @@ def jacobian(
J = self.joints_jacobian(frame, joint_positions)
B_H_L = self.forward_kinematics(frame, eye, joint_positions)
L_X_B = self.math.adjoint_inverse(B_H_L)
J_tot = self.math.factory.zeros(6, self.NDoF + 6)
J_tot[:6, :6] = L_X_B
J_tot[:, 6:] = J
J_tot = self.math.vcat_inputs(L_X_B, J)

if (
self.frame_velocity_representation
Expand All @@ -202,7 +208,8 @@ def jacobian(
w_H_L = base_transform @ B_H_L
LI_X_L = self.math.adjoint_mixed(w_H_L)
X = self.math.factory.eye(6 + self.NDoF)
X[:6, :6] = self.math.adjoint_mixed_inverse(base_transform)
X = self.math.compose_blocks(self.math.adjoint_mixed_inverse(base_transform), self.math.factory.zeros(6, self.NDoF),
self.math.factory.zeros(self.NDoF, 6), self.math.factory.eye(self.NDoF))
J_tot = LI_X_L @ J_tot @ X
return J_tot
else:
Expand Down Expand Up @@ -281,8 +288,10 @@ def jacobian_dot(

v = self.math.adjoint(L_H_B) @ B_v_IB
a = self.math.adjoint_derivative(L_H_B, v) @ B_v_IB
J[:, :6] = self.math.adjoint(L_H_B)
J_dot[:, :6] = self.math.adjoint_derivative(L_H_B, v)
joint_J_list = []
joint_J_dict = {}
joint_J_dot_list = []
joint_J_dot_dict = {}
for joint in chain:
q = joint_positions[joint.idx] if joint.idx is not None else 0.0
q_dot = joint_velocities[joint.idx] if joint.idx is not None else 0.0
Expand All @@ -294,8 +303,21 @@ def jacobian_dot(
J_dot_j = self.math.adjoint_derivative(L_H_j, v) @ joint.motion_subspace()
a += J_dot_j * q_dot
if joint.idx is not None:
J[:, joint.idx + 6] = J_j
J_dot[:, joint.idx + 6] = J_dot_j
joint_J_dict[joint.idx] = J_j
joint_J_dot_dict[joint.idx] = J_dot_j

for i in range(self.NDoF):
if i in joint_J_dict:
joint_J_list.append(joint_J_dict[i])
joint_J_dot_list.append(joint_J_dot_dict[i])
else:
joint_J_list.append(self.math.factory.zeros(6))
joint_J_dot_list.append(self.math.factory.zeros(6))
joint_J = self.math.stack_list(joint_J_list)
joint_J_dot = self.math.stack_list(joint_J_dot_list)

J = self.math.vcat_inputs(self.math.adjoint(L_H_B), joint_J)
J_dot = self.math.vcat_inputs(self.math.adjoint_derivative(L_H_B, v), joint_J_dot)

if (
self.frame_velocity_representation
Expand Down
60 changes: 24 additions & 36 deletions src/adam/core/spatial_math.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,12 +239,9 @@ def H_revolute_joint(
Returns:
npt.ArrayLike: Homogeneous transform
"""
T = self.factory.eye(4)
R = self.R_from_RPY(rpy) @ self.R_from_axis_angle(axis, q)
T[:3, :3] = R
T[0, 3] = xyz[0]
T[1, 3] = xyz[1]
T[2, 3] = xyz[2]
T = self.transform(xyz, R)

return T

def H_prismatic_joint(
Expand Down Expand Up @@ -460,10 +457,8 @@ def adjoint(self, H: npt.ArrayLike) -> npt.ArrayLike:
"""
R = H[:3, :3]
p = H[:3, 3]
X = self.factory.eye(6)
X[:3, :3] = R
X[3:6, 3:6] = R
X[:3, 3:6] = self.skew(p) @ R
X = self.compose_blocks(R, self.skew(p) @ R,
self.factory.zeros(3, 3), R)
return X

def adjoint_derivative(self, H: npt.ArrayLike, v: npt.ArrayLike) -> npt.ArrayLike:
Expand All @@ -479,10 +474,8 @@ def adjoint_derivative(self, H: npt.ArrayLike, v: npt.ArrayLike) -> npt.ArrayLik
p = H[:3, 3]
R_dot = self.skew(v[3:]) @ R
p_dot = v[:3] - self.skew(p) @ v[3:]
X = self.factory.zeros(6, 6)
X[:3, :3] = R_dot
X[3:6, 3:6] = R_dot
X[:3, 3:6] = self.skew(p_dot) @ R + self.skew(p) @ R_dot
X = self.compose_blocks(R_dot, self.skew(p_dot) @ R + self.skew(p) @ R_dot,
self.factory.zeros(3, 3), R_dot)
return X

def adjoint_inverse(self, H: npt.ArrayLike) -> npt.ArrayLike:
Expand All @@ -494,10 +487,8 @@ def adjoint_inverse(self, H: npt.ArrayLike) -> npt.ArrayLike:
"""
R = H[:3, :3]
p = H[:3, 3]
X = self.factory.eye(6)
X[:3, :3] = R.T
X[3:6, 3:6] = R.T
X[:3, 3:6] = -R.T @ self.skew(p)
X = self.compose_blocks(R.T, -R.T @ self.skew(p),
self.factory.zeros(3, 3), R.T)
return X

def adjoint_inverse_derivative(
Expand All @@ -514,10 +505,8 @@ def adjoint_inverse_derivative(
p = H[:3, 3]
R_dot = self.skew(v[3:]) @ R
p_dot = v[:3] - self.skew(p) @ v[3:]
X = self.factory.zeros(6, 6)
X[:3, :3] = R_dot.T
X[3:6, 3:6] = R_dot.T
X[:3, 3:6] = -R_dot.T @ self.skew(p) - R.T @ self.skew(p_dot)
X = self.compose_blocks(R_dot.T, -R_dot.T @ self.skew(p) - R.T @ self.skew(p_dot),
self.factory.zeros(3, 3), R_dot.T)
return X

def adjoint_mixed(self, H: npt.ArrayLike) -> npt.ArrayLike:
Expand All @@ -528,9 +517,9 @@ def adjoint_mixed(self, H: npt.ArrayLike) -> npt.ArrayLike:
npt.ArrayLike: adjoint matrix
"""
R = H[:3, :3]
X = self.factory.eye(6)
X[:3, :3] = R
X[3:6, 3:6] = R
R = H[:3, :3]
X = self.compose_blocks(R, self.factory.zeros(3,3),
self.factory.zeros(3,3), R)
return X

def adjoint_mixed_inverse(self, H: npt.ArrayLike) -> npt.ArrayLike:
Expand All @@ -541,9 +530,8 @@ def adjoint_mixed_inverse(self, H: npt.ArrayLike) -> npt.ArrayLike:
npt.ArrayLike: adjoint matrix
"""
R = H[:3, :3]
X = self.factory.eye(6)
X[:3, :3] = R.T
X[3:6, 3:6] = R.T
X = self.compose_blocks(R.T, self.factory.zeros(3,3),
self.factory.zeros(3,3), R.T)
return X

def adjoint_mixed_derivative(
Expand All @@ -558,9 +546,8 @@ def adjoint_mixed_derivative(
"""
R = H[:3, :3]
R_dot = self.skew(v[3:]) @ R
X = self.factory.zeros(6, 6)
X[:3, :3] = R_dot
X[3:6, 3:6] = R_dot
X = self.compose_blocks(R_dot, self.factory.zeros(3,3),
self.factory.zeros(3,3), R_dot)
return X

def adjoint_mixed_inverse_derivative(
Expand All @@ -575,9 +562,12 @@ def adjoint_mixed_inverse_derivative(
"""
R = H[:3, :3]
R_dot = self.skew(v[3:]) @ R
X = self.factory.zeros(6, 6)
X[:3, :3] = R_dot.T
X[3:6, 3:6] = R_dot.T
# X = self.factory.zeros(6, 6)
# X[:3, :3] = R_dot.T
# X[3:6, 3:6] = R_dot.T

X = self.compose_blocks(R_dot.T, self.factory.zeros(3,3),
self.factory.zeros(3,3), R_dot.T)
return X

def homogeneous_inverse(self, H: npt.ArrayLike) -> npt.ArrayLike:
Expand All @@ -589,7 +579,5 @@ def homogeneous_inverse(self, H: npt.ArrayLike) -> npt.ArrayLike:
"""
R = H[:3, :3]
p = H[:3, 3]
T = self.factory.eye(4)
T[:3, :3] = R.T
T[:3, 3] = -R.T @ p
T = self.transform(-R.T @ p, R.T)
return T
83 changes: 79 additions & 4 deletions src/adam/pytorch/torch_like.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ def __post_init__(self):
if self.array.dtype != torch.float64:
self.array = self.array.double()

#TODO retry the index_put_ override, the error with no len() is due to it already being a tensor usually
def __setitem__(self, idx, value: Union["TorchLike", ntp.ArrayLike]) -> "TorchLike":
"""Overrides set item operator"""
if type(self) is type(value):
Expand Down Expand Up @@ -149,6 +150,9 @@ def array(x: ntp.ArrayLike) -> "TorchLike":
Returns:
TorchLike: vector wrapping x
"""

if isinstance(x, torch.Tensor):
return TorchLike(x)
return TorchLike(torch.tensor(x))


Expand Down Expand Up @@ -205,13 +209,17 @@ def skew(x: Union["TorchLike", ntp.ArrayLike]) -> "TorchLike":
TorchLike: skew matrix from x
"""
if not isinstance(x, TorchLike):
x0, x1, x2 = x
return TorchLike(
torch.tensor([[0, -x[2], x[1]], [x[2], 0, -x[0]], [-x[1], x[0], 0]])
torch.tensor([[0, -x2, x1], [x2, 0, -x0], [-x1, x0, 0]])
)

x = x.array
return TorchLike(
torch.tensor([[0, -x[2], x[1]], [x[2], 0, -x[0]], [-x[1], x[0], 0]])
)
x0, x1, x2 = x.unbind(dim=-1)
skew_x = torch.stack([torch.tensor(0), -x2, x1,
x2, torch.tensor(0), -x0,
-x1, x0, torch.tensor(0)], dim=-1).reshape(x.shape[:-1] + (3, 3))
return TorchLike(skew_x)

@staticmethod
def vertcat(*x: ntp.ArrayLike) -> "TorchLike":
Expand All @@ -236,3 +244,70 @@ def horzcat(*x: ntp.ArrayLike) -> "TorchLike":
else:
v = torch.tensor(x)
return TorchLike(v)

@staticmethod
def transform(p: Union["TorchLike", ntp.ArrayLike], R: Union["TorchLike", ntp.ArrayLike]) -> "TorchLike":
"""
Returns:
TorchLike: composition of 4x4 transformation matrix from translation vector and rotation matrix
"""
R = R.array

if not isinstance(p, TorchLike):
T = torch.cat([
torch.cat([R, torch.tensor([p]).T], dim=-1),
torch.tensor([[0, 0, 0, 1]])
], dim=-2)
return TorchLike(T)

p = p.array.unsqueeze(0)

T = torch.cat([
torch.cat([R, p.T], dim=-1),
torch.tensor([[0, 0, 0, 1]])
], dim=-2)
return TorchLike(T)

@staticmethod
def compose_blocks(B00: Union["TorchLike", ntp.ArrayLike],
B01: Union["TorchLike", ntp.ArrayLike],
B10: Union["TorchLike", ntp.ArrayLike],
B11: Union["TorchLike", ntp.ArrayLike],) -> "TorchLike":
"""
Returns:
TorchLike: put together 4 matrix blocks in a single matrix
"""
first_row = torch.cat([B00.array, B01.array], dim=-1)
second_row = torch.cat([B10.array, B11.array], dim=-1)

X = torch.cat([
first_row,
second_row
], dim=-2)

return TorchLike(X)

@staticmethod
def stack_list(tensor_list: Union["TorchLike", ntp.ArrayLike]) -> "TorchLike":
"""
Returns:
TorchLike: take a list of tensors and stack them together
"""
if isinstance(tensor_list[0], TorchLike):
tensor_list = [t.array for t in tensor_list]

stacked_list = torch.stack(tensor_list, dim=1)

return TorchLike(stacked_list)

@staticmethod
def vcat_inputs(a: Union["TorchLike", ntp.ArrayLike], b: Union["TorchLike", ntp.ArrayLike]) -> "TorchLike":
"""
Returns:
TorchLike: take the base and joint components of the Jacobian to build the full Jacobian matrix
"""
a = a.array
b = b.array
c = torch.cat([a, b], dim=-1)

return TorchLike(c)
Loading