From 43416c4717d2430c3e11f042294d12b781fee2e1 Mon Sep 17 00:00:00 2001
From: natanielruiz <nataniel777@hotmail.com>
Date: 星期三, 27 九月 2017 04:09:30 +0800
Subject: [PATCH] Failed lstm experiment

---
 code/utils.py |   62 ++++++++++++++++++-------------
 1 files changed, 36 insertions(+), 26 deletions(-)

diff --git a/code/utils.py b/code/utils.py
index 645ae19..96c447b 100644
--- a/code/utils.py
+++ b/code/utils.py
@@ -7,6 +7,40 @@
 import math
 from math import cos, sin
 
+def softmax_temperature(tensor, temperature):
+    result = torch.exp(tensor / temperature)
+    result = torch.div(result, torch.sum(result, 1).unsqueeze(1).expand_as(result))
+    return result
+
+def get_pose_params_from_mat(mat_path):
+    # This functions gets the pose parameters from the .mat
+    # Annotations that come with the Pose_300W_LP dataset.
+    mat = sio.loadmat(mat_path)
+    # [pitch yaw roll tdx tdy tdz scale_factor]
+    pre_pose_params = mat['Pose_Para'][0]
+    # Get [pitch, yaw, roll, tdx, tdy]
+    pose_params = pre_pose_params[:5]
+    return pose_params
+
+def get_ypr_from_mat(mat_path):
+    # Get yaw, pitch, roll from .mat annotation.
+    # They are in radians
+    mat = sio.loadmat(mat_path)
+    # [pitch yaw roll tdx tdy tdz scale_factor]
+    pre_pose_params = mat['Pose_Para'][0]
+    # Get [pitch, yaw, roll]
+    pose_params = pre_pose_params[:3]
+    return pose_params
+
+def get_pt2d_from_mat(mat_path):
+    # Get 2D landmarks
+    mat = sio.loadmat(mat_path)
+    pt2d = mat['pt2d']
+    return pt2d
+
+def mse_loss(input, target):
+    return torch.sum(torch.abs(input.data - target.data) ** 2)
+
 def plot_pose_cube(img, yaw, pitch, roll, tdx=None, tdy=None, size=150.):
     # Input is a cv2 image
     # pose_params: (pitch, yaw, roll, tdx, tdy)
@@ -16,14 +50,13 @@
     p = pitch * np.pi / 180
     y = -(yaw * np.pi / 180)
     r = roll * np.pi / 180
-
     if tdx != None and tdy != None:
         face_x = tdx - 0.50 * size
         face_y = tdy - 0.50 * size
     else:
         height, width = img.shape[:2]
-        face_x = width / 2 - 0.15 - size
-        face_y = height / 2 - 0.15 - size
+        face_x = width / 2 - 0.5 * size
+        face_y = height / 2 - 0.5 * size
 
     x1 = size * (cos(y) * cos(r)) + face_x
     y1 = size * (cos(p) * sin(r) + cos(r) * sin(p) * sin(y)) + face_y
@@ -49,26 +82,3 @@
     cv2.line(img, (int(x3), int(y3)), (int(x3+x2-face_x),int(y3+y2-face_y)),(0,255,0),2)
 
     return img
-
-def get_pose_params_from_mat(mat_path):
-    # This functions gets the pose parameters from the .mat
-    # Annotations that come with the 300W_LP dataset.
-    mat = sio.loadmat(mat_path)
-    # [pitch yaw roll tdx tdy tdz scale_factor]
-    pre_pose_params = mat['Pose_Para'][0]
-    # Get [pitch, yaw, roll, tdx, tdy]
-    pose_params = pre_pose_params[:5]
-    return pose_params
-
-def get_ypr_from_mat(mat_path):
-    # Get yaw, pitch, roll from .mat annotation.
-    # They are in radians
-    mat = sio.loadmat(mat_path)
-    # [pitch yaw roll tdx tdy tdz scale_factor]
-    pre_pose_params = mat['Pose_Para'][0]
-    # Get [pitch, yaw, roll]
-    pose_params = pre_pose_params[:3]
-    return pose_params
-
-def mse_loss(input, target):
-    return torch.sum(torch.abs(input.data - target.data) ** 2)

--
Gitblit v1.8.0