Skip to content

Commit

Permalink
Smpl relations.
Browse files Browse the repository at this point in the history
  • Loading branch information
gulvarol committed Dec 14, 2018
1 parent 8af8ae1 commit d1e6309
Show file tree
Hide file tree
Showing 3 changed files with 346 additions and 0 deletions.
122 changes: 122 additions & 0 deletions datageneration/misc/smpl_relations/rotations.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,122 @@
import numpy as np
import math


def rotvec2rotmat(rotvec):
# computes rotation matrix through Rodrigues formula as in cv2.Rodrigues
theta = np.linalg.norm(rotvec)
r = (rotvec / theta).reshape(3, 1) if theta > 0. else rotvec
cost = np.cos(theta)
mat = np.asarray([[0, -r[2], r[1]],
[r[2], 0, -r[0]],
[-r[1], r[0], 0]])
return(cost * np.eye(3) + (1 - cost) * r.dot(r.T) + np.sin(theta) * mat)


def euler2rotmat(e):
Rx = np.array(((1, 0, 0),
(0, math.cos(e[0]), -math.sin(e[0])),
(0, math.sin(e[0]), math.cos(e[0]))))
Ry = np.array(((math.cos(e[1]), 0, math.sin(e[1])),
(0, 1, 0),
(-math.sin(e[1]), 0, math.cos(e[1]))))
Rz = np.array(((math.cos(e[2]), -math.sin(e[2]), 0),
(math.sin(e[2]), math.cos(e[2]), 0),
(0, 0, 1)))
return np.dot(Rz, np.dot(Ry, Rx))


def rotmat2euler(R):
sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])
rex = math.atan2(R[2, 1], R[2, 2])
rey = math.atan2(-R[2, 0], sy)
rez = math.atan2(R[1, 0], R[0, 0]) # euler
return np.array((rex, rey, rez))


def normalize(v):
v_mag = np.linalg.norm(v)
if v_mag == 0:
v = np.zeros(3)
v[0] = 1
else:
v = v / v_mag
return v


def rotmat2rotvec(R):
# Convert rotation matrix representation to axis-angle (rotation vector) representation
# R: Rotation matrix, u: unit vector, theta: angle
# https://en.wikipedia.org/wiki/Rotation_matrix#Conversion_from_and_to_axis.E2.80.93angle
u = np.zeros(3)
x = 0.5 * (R[0, 0] + R[1, 1] + R[2, 2] - 1) # 1.0000000484288
x = max(x, -1)
x = min(x, 1)
theta = math.acos(x) # Tr(R) = 1 + 2 cos(theta)

if(theta < 1e-4): # avoid division by zero!
print('theta ~= 0 %f' % theta)
return u
elif(abs(theta - math.pi) < 1e-4):
print('theta ~= pi %f' % theta)
if (R[0][0] >= R[2][2]):
if (R[1][1] >= R[2][2]):
u[0] = R[0][0] + 1
u[1] = R[1][0]
u[2] = R[2][0]
else:
u[0] = R[0][1]
u[1] = R[1][1] + 1
u[2] = R[2][1]
else:
u[0] = R[0][2]
u[1] = R[1][2]
u[2] = R[2][2] + 1

u = normalize(u)
else:
d = 1 / (2 * math.sin(theta)) # ||u|| = 2sin(theta)
u[0] = d * (R[2, 1] - R[1, 2])
u[1] = d * (R[0, 2] - R[2, 0])
u[2] = d * (R[1, 0] - R[0, 1])
return u * theta


def makeRotationMatrix(R):
# make R a proper rotation matrix, force orthonormal
U, S, Vt = np.linalg.svd(R)
R = np.dot(U, Vt)

# remove reflection
if np.linalg.det(R) < 0:
Vt[2, :] *= -1
R = np.dot(U, Vt)
return R


def euler2rotvec(e):
R = euler2rotmat(e)
return rotmat2rotvec(R)


def angle_between(v1, v2):
""" Returns the angle in radians between vectors 'v1' and 'v2'::
>>> angle_between((1, 0, 0), (0, 1, 0))
1.5707963267948966
>>> angle_between((1, 0, 0), (1, 0, 0))
0.0
>>> angle_between((1, 0, 0), (-1, 0, 0))
3.141592653589793
"""
v1_u = normalize(v1)
v2_u = normalize(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))


def axisangle(v1, v2):
v1_u = normalize(v1)
v2_u = normalize(v2)
angle = angle_between(v1, v2)
axis = normalize(np.cross(v1_u, v2_u))
return axis * angle
Binary file not shown.
224 changes: 224 additions & 0 deletions datageneration/misc/smpl_relations/smpl_relations.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,224 @@
# Example usage:
# python smpl_relations.py \
# --fileinfo /home/gvarol/datasets/SURREAL/data/cmu/train/run0/03_01/01_01_c0001_info.mat \
# --t_beg 0 --t_end 100

import argparse
import cv2
import math
import matplotlib.pyplot as plt
import numpy as np
import os
import scipy.io as sio
import sys

import rotations
SMPL_PATH = os.getenv('SMPL_PATH', '../../')
sys.path.append(SMPL_PATH)
from smpl_webuser.serialization import load_model


def joint_names():
return ['hips',
'leftUpLeg',
'rightUpLeg',
'spine',
'leftLeg',
'rightLeg',
'spine1',
'leftFoot',
'rightFoot',
'spine2',
'leftToeBase',
'rightToeBase',
'neck',
'leftShoulder',
'rightShoulder',
'head',
'leftArm',
'rightArm',
'leftForeArm',
'rightForeArm',
'leftHand',
'rightHand',
'leftHandIndex1',
'rightHandIndex1']


def draw_joints2D(joints2D, ax=None, kintree_table=None, with_text=True, color='g'):
if not ax:
fig = plt.figure()
ax = fig.add_subplot(111)

for i in range(1, kintree_table.shape[1]):
j1 = kintree_table[0][i]
j2 = kintree_table[1][i]
ax.plot([joints2D[j1, 0], joints2D[j2, 0]],
[joints2D[j1, 1], joints2D[j2, 1]],
color=color, linestyle='-', linewidth=2, marker='o', markersize=5)
if with_text:
ax.text(joints2D[j2, 0],
joints2D[j2, 1],
s=joint_names()[j2],
color=color,
fontsize=8)


def rotateBody(RzBody, pelvisRotVec):
Rpelvis = rotations.rotvec2rotmat(pelvisRotVec)
globRotMat = np.dot(RzBody, Rpelvis)
# R90 = rotations.euler2rotmat(np.array((np.pi / 2, np.pi / 2, 0)))
R90 = rotations.euler2rotmat(np.array((np.pi / 2, 0, 0)))
globRotVec = rotations.rotmat2rotvec(np.dot(R90, globRotMat))
# globRotVec = rotations.rotmat2rotvec(globRotMat)
return globRotVec


# Returns intrinsic camera matrix
# Parameters are hard-coded since all SURREAL images use the same.
def get_intrinsic():
# These are set in Blender (datageneration/main_part1.py)
res_x_px = 320 # *scn.render.resolution_x
res_y_px = 240 # *scn.render.resolution_y
f_mm = 60 # *cam_ob.data.lens
sensor_w_mm = 32 # *cam_ob.data.sensor_width
sensor_h_mm = sensor_w_mm * res_y_px / res_x_px # *cam_ob.data.sensor_height (function of others)

scale = 1 # *scn.render.resolution_percentage/100
skew = 0 # only use rectangular pixels
pixel_aspect_ratio = 1

# From similar triangles:
# sensor_width_in_mm / resolution_x_inx_pix = focal_length_x_in_mm / focal_length_x_in_pix
fx_px = f_mm * res_x_px * scale / sensor_w_mm
fy_px = f_mm * res_y_px * scale * pixel_aspect_ratio / sensor_h_mm

# Center of the image
u = res_x_px * scale / 2
v = res_y_px * scale / 2

# Intrinsic camera matrix
K = np.array([[fx_px, skew, u], [0, fy_px, v], [0, 0, 1]])
return K


# Returns extrinsic camera matrix
# T : translation vector from Blender (*cam_ob.location)
# RT: extrinsic computer vision camera matrix
# Script based on https://blender.stackexchange.com/questions/38009/3x4-camera-matrix-from-blender-camera
def get_extrinsic(T):
# Take the first 3 columns of the matrix_world in Blender and transpose.
# This is hard-coded since all images in SURREAL use the same.
R_world2bcam = np.array([[0, 0, 1], [0, -1, 0], [-1, 0, 0]]).transpose()
# *cam_ob.matrix_world = Matrix(((0., 0., 1, params['camera_distance']),
# (0., -1, 0., -1.0),
# (-1., 0., 0., 0.),
# (0.0, 0.0, 0.0, 1.0)))

# Convert camera location to translation vector used in coordinate changes
T_world2bcam = -1 * np.dot(R_world2bcam, T)

# Following is needed to convert Blender camera to computer vision camera
R_bcam2cv = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]])

# Build the coordinate transform matrix from world to computer vision camera
R_world2cv = np.dot(R_bcam2cv, R_world2bcam)
T_world2cv = np.dot(R_bcam2cv, T_world2bcam)

# Put into 3x4 matrix
RT = np.concatenate([R_world2cv, T_world2cv], axis=1)
return RT, R_world2cv, T_world2cv


def project_vertices(points, intrinsic, extrinsic):
homo_coords = np.concatenate([points, np.ones((points.shape[0], 1))], axis=1).transpose()
proj_coords = np.dot(intrinsic, np.dot(extrinsic, homo_coords))
proj_coords = proj_coords / proj_coords[2]
proj_coords = proj_coords[:2].transpose()
return proj_coords


def get_frame(filevideo, t=0):
cap = cv2.VideoCapture(filevideo)
cap.set(propId=1, value=t)
ret, frame = cap.read()
frame = frame[:, :, [2, 1, 0]]
return frame


def main():
parser = argparse.ArgumentParser(description='Demo to read SMPL vertices of the SURREAL dataset.')
parser.add_argument('--fileinfo', type=str,
help='Path to the *_info.mat file')
parser.add_argument('--t_beg', type=int, default=0,
help='Frame number (default 0)')
parser.add_argument('--t_end', type=int, default=1,
help='Frame number (default 1)')
args = parser.parse_args()
print('fileinfo: {}'.format(args.fileinfo))
print('t_beg: {}'.format(args.t_beg))
print('t_end: {}'.format(args.t_end))

info = sio.loadmat(args.fileinfo)

# <========= LOAD SMPL MODEL BASED ON GENDER
if info['gender'][0] == 0: # f
m = load_model(os.path.join(SMPL_PATH, 'models/basicModel_f_lbs_10_207_0_v1.0.0.pkl'))
elif info['gender'][0] == 1: # m
m = load_model(os.path.join(SMPL_PATH, 'models/basicModel_m_lbs_10_207_0_v1.0.0.pkl'))
# =========>

root_pos = m.J_transformed.r[0]

zrot = info['zrot']
zrot = zrot[0][0] # body rotation in euler angles
RzBody = np.array(((math.cos(zrot), -math.sin(zrot), 0),
(math.sin(zrot), math.cos(zrot), 0),
(0, 0, 1)))
intrinsic = get_intrinsic()
extrinsic, R, T = get_extrinsic(info['camLoc'])

plt.figure(figsize=(18, 10))
for t in range(args.t_beg, args.t_end):

joints2D = info['joints2D'][:, :, t].T
joints3D = info['joints3D'][:, :, t].T
pose = info['pose'][:, t]
pose[0:3] = rotateBody(RzBody, pose[0:3])
# Set model shape
m.betas[:] = info['shape'][:, 0]
# Set model pose
m.pose[:] = pose
# Set model translation
m.trans[:] = joints3D[0] - root_pos

smpl_vertices = m.r
smpl_joints3D = m.J_transformed.r

# Project 3D -> 2D
proj_smpl_vertices = project_vertices(smpl_vertices, intrinsic, extrinsic)
proj_smpl_joints3D = project_vertices(smpl_joints3D, intrinsic, extrinsic)
proj_joints3D = project_vertices(joints3D, intrinsic, extrinsic)

# Read frame of the video
rgb = get_frame(args.fileinfo[:-9] + '.mp4', t)
plt.clf()
# Show 2D skeletons (note that left/right are swapped)
ax1 = plt.subplot(1, 2, 1)
plt.imshow(rgb)
# Released joints2D variable
draw_joints2D(joints2D, ax1, m.kintree_table, color='b')
# SMPL 3D joints projection
draw_joints2D(proj_smpl_joints3D, ax1, m.kintree_table, color='r')
# Released joints3D variable projection
draw_joints2D(proj_joints3D, ax1, m.kintree_table, color='g')
# Show vertices projection
plt.subplot(1, 2, 2)
plt.imshow(rgb)
plt.scatter(proj_smpl_vertices[:, 0], proj_smpl_vertices[:, 1], 1)
plt.pause(1)
plt.show()


if __name__ == '__main__':
main()

0 comments on commit d1e6309

Please sign in to comment.