From b215992b7a187782cf494ab9f291195ffde9278e Mon Sep 17 00:00:00 2001
From: natanielruiz <nataniel777@hotmail.com>
Date: 星期五, 11 八月 2017 05:23:31 +0800
Subject: [PATCH] One shape param experiment

---
 code/datasets.py |   22 +++++++++++++++++-----
 1 files changed, 17 insertions(+), 5 deletions(-)

diff --git a/code/datasets.py b/code/datasets.py
index 06cd433..29800fe 100644
--- a/code/datasets.py
+++ b/code/datasets.py
@@ -7,6 +7,10 @@
 
 import utils
 
+def stack_grayscale_tensor(tensor):
+    tensor = torch.cat([tensor, tensor, tensor], 0)
+    return tensor
+
 class Pose_300W_LP(Dataset):
     def __init__(self, data_dir, filename_path, transform, img_ext='.jpg', annot_ext='.mat'):
         self.data_dir = data_dir
@@ -66,7 +70,7 @@
         return self.length
 
 class Pose_300W_LP_binned(Dataset):
-    def __init__(self, data_dir, filename_path, transform, img_ext='.jpg', annot_ext='.mat'):
+    def __init__(self, data_dir, filename_path, transform, img_ext='.jpg', annot_ext='.mat', image_mode='RGB'):
         self.data_dir = data_dir
         self.transform = transform
         self.img_ext = img_ext
@@ -76,12 +80,14 @@
 
         self.X_train = filename_list
         self.y_train = filename_list
+        self.image_mode = image_mode
         self.length = len(filename_list)
 
     def __getitem__(self, index):
         img = Image.open(os.path.join(self.data_dir, self.X_train[index] + self.img_ext))
-        img = img.convert('RGB')
+        img = img.convert(self.image_mode)
         mat_path = os.path.join(self.data_dir, self.y_train[index] + self.annot_ext)
+        shape_path = os.path.join(self.data_dir, self.y_train[index] + '_shape.npy')
 
         # Crop the face
         pt2d = utils.get_pt2d_from_mat(mat_path)
@@ -105,7 +111,12 @@
         roll = pose[2] * 180 / np.pi
         # Bin values
         bins = np.array(range(-99, 102, 3))
-        labels = torch.LongTensor(np.digitize([yaw, pitch, roll], bins) - 1)
+        binned_pose = np.digitize([yaw, pitch, roll], bins) - 1
+
+        # Get shape
+        shape = np.load(shape_path)
+
+        labels = torch.LongTensor(np.concatenate((binned_pose, shape), axis = 0))
 
         if self.transform is not None:
             img = self.transform(img)
@@ -117,7 +128,7 @@
         return self.length
 
 class AFLW2000_binned(Dataset):
-    def __init__(self, data_dir, filename_path, transform, img_ext='.jpg', annot_ext='.mat'):
+    def __init__(self, data_dir, filename_path, transform, img_ext='.jpg', annot_ext='.mat', image_mode='RGB'):
         self.data_dir = data_dir
         self.transform = transform
         self.img_ext = img_ext
@@ -127,11 +138,12 @@
 
         self.X_train = filename_list
         self.y_train = filename_list
+        self.image_mode = image_mode
         self.length = len(filename_list)
 
     def __getitem__(self, index):
         img = Image.open(os.path.join(self.data_dir, self.X_train[index] + self.img_ext))
-        img = img.convert('RGB')
+        img = img.convert(self.image_mode)
         mat_path = os.path.join(self.data_dir, self.y_train[index] + self.annot_ext)
 
         # Crop the face

--
Gitblit v1.8.0