-
Notifications
You must be signed in to change notification settings - Fork 9
/
Copy pathtools.py
316 lines (260 loc) · 11.5 KB
/
tools.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
import pytorch3d.loss
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch3d.ops import cot_laplacian
from pytorch3d.structures import Meshes
from human_body_prior.tools import tgm_conversion as tgm
import chamfer_distance as chd
def point2point_signed(
x,
y,
x_normals=None,
y_normals=None,
return_vector=False,
):
"""
signed distance between two pointclouds
Args:
x: FloatTensor of shape (N, P1, D) representing a batch of point clouds
with P1 points in each batch element, batch size N and feature
dimension D.
y: FloatTensor of shape (N, P2, D) representing a batch of point clouds
with P2 points in each batch element, batch size N and feature
dimension D.
x_normals: Optional FloatTensor of shape (N, P1, D).
y_normals: Optional FloatTensor of shape (N, P2, D).
Returns:
- y2x_signed: Torch.Tensor
the sign distance from y to x
- y2x_signed: Torch.Tensor
the sign distance from y to x
- yidx_near: Torch.tensor
the indices of x vertices closest to y
"""
N, P1, D = x.shape
P2 = y.shape[1]
if y.shape[0] != N or y.shape[2] != D:
raise ValueError("y does not have the correct shape.")
ch_dist = chd.ChamferDistance()
x_near, y_near, xidx_near, yidx_near = ch_dist(x,y,x_normals=x_normals,y_normals=y_normals)
xidx_near_expanded = xidx_near.view(N, P1, 1).expand(N, P1, D).to(torch.long)
x_near = y.gather(1, xidx_near_expanded)
yidx_near_expanded = yidx_near.view(N, P2, 1).expand(N, P2, D).to(torch.long)
y_near = x.gather(1, yidx_near_expanded)
x2y = x - x_near # y point to x
y2x = y - y_near # x point to y
if x_normals is not None:
y_nn = x_normals.gather(1, yidx_near_expanded)
in_out = torch.bmm(y_nn.view(-1, 1, 3), y2x.view(-1, 3, 1)).view(N, -1).sign()
y2x_signed = y2x.norm(dim=2) * in_out
else:
y2x_signed = y2x.norm(dim=2)
if y_normals is not None:
x_nn = y_normals.gather(1, xidx_near_expanded)
in_out_x = torch.bmm(x_nn.view(-1, 1, 3), x2y.view(-1, 3, 1)).view(N, -1).sign()
x2y_signed = x2y.norm(dim=2) * in_out_x
else:
x2y_signed = x2y.norm(dim=2)
if not return_vector:
return y2x_signed, x2y_signed, yidx_near, xidx_near
else:
return y2x_signed, x2y_signed, yidx_near, xidx_near, y2x, x2y
def aa2matrot(pose):
'''
:param Nx3
:return: pose_matrot: Nx3x3
'''
bs = pose.size(0)
num_joints = pose.size(1)//3
pose_body_matrot = tgm.angle_axis_to_rotation_matrix(pose)[:, :3, :3].contiguous()#.view(bs, num_joints*9)
return pose_body_matrot
def rotvec_to_rotmat(rotvec):
rotmat = aa2matrot(rotvec.contiguous().view(-1, 3)).view(-1, 3, 3)
return rotmat
def mesh_laplacian_smoothing(meshes, method: str = "uniform"):
r"""
Computes the laplacian smoothing objective for a batch of meshes.
This function supports three variants of Laplacian smoothing,
namely with uniform weights("uniform"), with cotangent weights ("cot"),
and cotangent curvature ("cotcurv").For more details read [1, 2].
Args:
meshes: Meshes object with a batch of meshes.
method: str specifying the method for the laplacian.
Returns:
loss: Average laplacian smoothing loss across the batch.
Returns 0 if meshes contains no meshes or all empty meshes.
Consider a mesh M = (V, F), with verts of shape Nx3 and faces of shape Mx3.
The Laplacian matrix L is a NxN tensor such that LV gives a tensor of vectors:
for a uniform Laplacian, LuV[i] points to the centroid of its neighboring
vertices, a cotangent Laplacian LcV[i] is known to be an approximation of
the surface normal, while the curvature variant LckV[i] scales the normals
by the discrete mean curvature. For vertex i, assume S[i] is the set of
neighboring vertices to i, a_ij and b_ij are the "outside" angles in the
two triangles connecting vertex v_i and its neighboring vertex v_j
for j in S[i], as seen in the diagram below.
.. code-block:: python
a_ij
/\
/ \
/ \
/ \
v_i /________\ v_j
\ /
\ /
\ /
\ /
\/
b_ij
The definition of the Laplacian is LV[i] = sum_j w_ij (v_j - v_i)
For the uniform variant, w_ij = 1 / |S[i]|
For the cotangent variant,
w_ij = (cot a_ij + cot b_ij) / (sum_k cot a_ik + cot b_ik)
For the cotangent curvature, w_ij = (cot a_ij + cot b_ij) / (4 A[i])
where A[i] is the sum of the areas of all triangles containing vertex v_i.
There is a nice trigonometry identity to compute cotangents. Consider a triangle
with side lengths A, B, C and angles a, b, c.
.. code-block:: python
c
/|\
/ | \
/ | \
B / H| \ A
/ | \
/ | \
/a_____|_____b\
C
Then cot a = (B^2 + C^2 - A^2) / 4 * area
We know that area = CH/2, and by the law of cosines we have
A^2 = B^2 + C^2 - 2BC cos a => B^2 + C^2 - A^2 = 2BC cos a
Putting these together, we get:
B^2 + C^2 - A^2 2BC cos a
_______________ = _________ = (B/H) cos a = cos a / sin a = cot a
4 * area 2CH
[1] Desbrun et al, "Implicit fairing of irregular meshes using diffusion
and curvature flow", SIGGRAPH 1999.
[2] Nealan et al, "Laplacian Mesh Optimization", Graphite 2006.
"""
if meshes.isempty():
return torch.tensor(
[0.0], dtype=torch.float32, device=meshes.device, requires_grad=True
)
N = len(meshes)
verts_packed = meshes.verts_packed() # (sum(V_n), 3)
faces_packed = meshes.faces_packed() # (sum(F_n), 3)
num_verts_per_mesh = meshes.num_verts_per_mesh() # (N,)
verts_packed_idx = meshes.verts_packed_to_mesh_idx() # (sum(V_n),)
weights = num_verts_per_mesh.gather(0, verts_packed_idx) # (sum(V_n),)
weights = 1.0 / weights.float()
# We don't want to backprop through the computation of the Laplacian;
# just treat it as a magic constant matrix that is used to transform
# verts into normals
with torch.no_grad():
if method == "uniform":
L = meshes.laplacian_packed()
elif method in ["cot", "cotcurv"]:
L, inv_areas = cot_laplacian(verts_packed, faces_packed)
if method == "cot":
norm_w = torch.sparse.sum(L, dim=1).to_dense().view(-1, 1)
idx = norm_w > 0
norm_w[idx] = 1.0 / norm_w[idx]
else:
L_sum = torch.sparse.sum(L, dim=1).to_dense().view(-1, 1)
norm_w = 0.25 * inv_areas
else:
raise ValueError("Method should be one of {uniform, cot, cotcurv}")
if method == "uniform":
laplacian = L.mm(verts_packed)
elif method == "cot":
laplacian = L.mm(verts_packed) * norm_w - verts_packed
elif method == "cotcurv":
# pyre-fixme[61]: `norm_w` may not be initialized here.
laplacian = (L.mm(verts_packed) - L_sum * verts_packed) * norm_w
curvature = laplacian.norm(p=2, dim=1)
return curvature
class LaplacianLoss(nn.Module):
# faces: BxFx3
def __init__(self, faces):
super(LaplacianLoss, self).__init__()
self.faces = faces
self.criterion = nn.L1Loss(reduction='mean')
# x,y: BxVx3
def forward(self, x, y):
batch_size = x.shape[0]
mesh_x = Meshes(
verts=x,
faces=self.faces.unsqueeze(0).expand(batch_size, -1, -1)
)
mesh_y = Meshes(
verts=y,
faces=self.faces.unsqueeze(0).expand(batch_size, -1, -1)
)
curvature_x = mesh_laplacian_smoothing(mesh_x, method='cotcurv')
# curvature_y = mesh_laplacian_smoothing(mesh_y, method='cotcurv')
# loss = self.criterion(curvature_x, curvature_y)
loss = curvature_x.mean()
return loss
class NormalConsistencyLoss(nn.Module):
# faces: BxFx3
def __init__(self, faces):
super(NormalConsistencyLoss, self).__init__()
self.faces = faces
# x,y: BxVx3
def forward(self, x):
batch_size = x.shape[0]
mesh_x = Meshes(
verts=x,
faces=self.faces.unsqueeze(0).expand(batch_size, -1, -1)
)
return pytorch3d.loss.mesh_normal_consistency(mesh_x)
# https://github.com/hongsukchoi/Pose2Mesh_RELEASE/blob/master/lib/core/loss.py
class NormalVectorLoss(nn.Module):
# face: Fx3
def __init__(self, face):
super(NormalVectorLoss, self).__init__()
self.face = face
def forward(self, coord_out, coord_gt):
face = torch.LongTensor(self.face).cuda()
v1_out = coord_out[:, face[:, 1], :] - coord_out[:, face[:, 0], :]
v1_out = F.normalize(v1_out, p=2, dim=2) # L2 normalize to make unit vector
v2_out = coord_out[:, face[:, 2], :] - coord_out[:, face[:, 0], :]
v2_out = F.normalize(v2_out, p=2, dim=2) # L2 normalize to make unit vector
v3_out = coord_out[:, face[:, 2], :] - coord_out[:, face[:, 1], :]
v3_out = F.normalize(v3_out, p=2, dim=2) # L2 nroamlize to make unit vector
v1_gt = coord_gt[:, face[:, 1], :] - coord_gt[:, face[:, 0], :]
v1_gt = F.normalize(v1_gt, p=2, dim=2) # L2 normalize to make unit vector
v2_gt = coord_gt[:, face[:, 2], :] - coord_gt[:, face[:, 0], :]
v2_gt = F.normalize(v2_gt, p=2, dim=2) # L2 normalize to make unit vector
normal_gt = torch.cross(v1_gt, v2_gt, dim=2)
normal_gt = F.normalize(normal_gt, p=2, dim=2) # L2 normalize to make unit vector
cos1 = torch.abs(torch.sum(v1_out * normal_gt, 2, keepdim=True))
cos2 = torch.abs(torch.sum(v2_out * normal_gt, 2, keepdim=True))
cos3 = torch.abs(torch.sum(v3_out * normal_gt, 2, keepdim=True))
loss = torch.cat((cos1, cos2, cos3), 1)
return loss.mean()
epsilon = 1e-16
class EdgeLengthLoss(nn.Module):
def __init__(self, face, relative_length=False):
super(EdgeLengthLoss, self).__init__()
self.face = face
self.relative_length = relative_length
def forward(self, coord_out, coord_gt):
face = torch.LongTensor(self.face).cuda()
d1_out = torch.sqrt(epsilon +
torch.sum((coord_out[:, face[:, 0], :] - coord_out[:, face[:, 1], :]) ** 2, 2, keepdim=True))
d2_out = torch.sqrt(epsilon +
torch.sum((coord_out[:, face[:, 0], :] - coord_out[:, face[:, 2], :]) ** 2, 2, keepdim=True))
d3_out = torch.sqrt(epsilon +
torch.sum((coord_out[:, face[:, 1], :] - coord_out[:, face[:, 2], :]) ** 2, 2, keepdim=True))
d1_gt = torch.sqrt(epsilon + torch.sum((coord_gt[:, face[:, 0], :] - coord_gt[:, face[:, 1], :]) ** 2, 2, keepdim=True))
d2_gt = torch.sqrt(epsilon + torch.sum((coord_gt[:, face[:, 0], :] - coord_gt[:, face[:, 2], :]) ** 2, 2, keepdim=True))
d3_gt = torch.sqrt(epsilon + torch.sum((coord_gt[:, face[:, 1], :] - coord_gt[:, face[:, 2], :]) ** 2, 2, keepdim=True))
diff1 = torch.abs(d1_out - d1_gt)
diff2 = torch.abs(d2_out - d2_gt)
diff3 = torch.abs(d3_out - d3_gt)
if self.relative_length:
diff1 = diff1 / d1_gt
diff2 = diff2 / d2_gt
diff3 = diff3 / d3_gt
loss = torch.cat((diff1, diff2, diff3), 1)
return loss.mean()