From 92ed4cb2ea68be44b1ff153e00410c2082ee62df Mon Sep 17 00:00:00 2001 From: natanielruiz <nataniel777@hotmail.com> Date: 星期二, 15 八月 2017 00:28:28 +0800 Subject: [PATCH] New experiments with hourglass --- code/datasets.py | 222 +++++++++++++++++++++++++++++++++--------------------- 1 files changed, 135 insertions(+), 87 deletions(-) diff --git a/code/datasets.py b/code/datasets.py index 06cd433..63ee287 100644 --- a/code/datasets.py +++ b/code/datasets.py @@ -7,8 +7,12 @@ import utils +def stack_grayscale_tensor(tensor): + tensor = torch.cat([tensor, tensor, tensor], 0) + return tensor + class Pose_300W_LP(Dataset): - def __init__(self, data_dir, filename_path, transform, img_ext='.jpg', annot_ext='.mat'): + def __init__(self, data_dir, filename_path, transform, img_ext='.jpg', annot_ext='.mat', image_mode='RGB'): self.data_dir = data_dir self.transform = transform self.img_ext = img_ext @@ -18,26 +22,55 @@ self.X_train = filename_list self.y_train = filename_list + self.image_mode = image_mode self.length = len(filename_list) def __getitem__(self, index): img = Image.open(os.path.join(self.data_dir, self.X_train[index] + self.img_ext)) - img = img.convert('RGB') + img = img.convert(self.image_mode) + mat_path = os.path.join(self.data_dir, self.y_train[index] + self.annot_ext) + shape_path = os.path.join(self.data_dir, self.y_train[index] + '_shape.npy') - pose = utils.get_ypr_from_mat(os.path.join(self.data_dir, self.y_train[index] + self.annot_ext)) - label = torch.FloatTensor(pose) + # Crop the face + pt2d = utils.get_pt2d_from_mat(mat_path) + x_min = min(pt2d[0,:]) + y_min = min(pt2d[1,:]) + x_max = max(pt2d[0,:]) + y_max = max(pt2d[1,:]) + + k = 0.15 + x_min -= k * abs(x_max - x_min) + y_min -= 4 * k * abs(y_max - y_min) + x_max += k * abs(x_max - x_min) + y_max += 0.4 * k * abs(y_max - y_min) + img = img.crop((int(x_min), int(y_min), int(x_max), int(y_max))) + + # We get the pose in radians + pose = utils.get_ypr_from_mat(mat_path) + # And convert to degrees. + pitch = pose[0] * 180 / np.pi + yaw = pose[1] * 180 / np.pi + roll = pose[2] * 180 / np.pi + # Bin values + bins = np.array(range(-99, 102, 3)) + binned_pose = np.digitize([yaw, pitch, roll], bins) - 1 + + # Get shape + shape = np.load(shape_path) + + labels = torch.LongTensor(np.concatenate((binned_pose, shape), axis = 0)) if self.transform is not None: img = self.transform(img) - return img, label, self.X_train[index] + return img, labels, self.X_train[index] def __len__(self): # 122,450 return self.length class AFLW2000(Dataset): - def __init__(self, data_dir, filename_path, transform, img_ext='.jpg', annot_ext='.mat'): + def __init__(self, data_dir, filename_path, transform, img_ext='.jpg', annot_ext='.mat', image_mode='RGB'): self.data_dir = data_dir self.transform = transform self.img_ext = img_ext @@ -47,91 +80,12 @@ self.X_train = filename_list self.y_train = filename_list + self.image_mode = image_mode self.length = len(filename_list) def __getitem__(self, index): img = Image.open(os.path.join(self.data_dir, self.X_train[index] + self.img_ext)) - img = img.convert('RGB') - - pose = utils.get_ypr_from_mat(os.path.join(self.data_dir, self.y_train[index] + self.annot_ext)) - label = torch.FloatTensor(pose) - - if self.transform is not None: - img = self.transform(img) - - return img, label, self.X_train[index] - - def __len__(self): - # 2,000 - return self.length - -class Pose_300W_LP_binned(Dataset): - def __init__(self, data_dir, filename_path, transform, img_ext='.jpg', annot_ext='.mat'): - self.data_dir = data_dir - self.transform = transform - self.img_ext = img_ext - self.annot_ext = annot_ext - - filename_list = get_list_from_filenames(filename_path) - - self.X_train = filename_list - self.y_train = filename_list - self.length = len(filename_list) - - def __getitem__(self, index): - img = Image.open(os.path.join(self.data_dir, self.X_train[index] + self.img_ext)) - img = img.convert('RGB') - mat_path = os.path.join(self.data_dir, self.y_train[index] + self.annot_ext) - - # Crop the face - pt2d = utils.get_pt2d_from_mat(mat_path) - x_min = min(pt2d[0,:]) - y_min = min(pt2d[1,:]) - x_max = max(pt2d[0,:]) - y_max = max(pt2d[1,:]) - - k = 0.15 - x_min -= k * abs(x_max - x_min) - y_min -= 4 * k * abs(y_max - y_min) - x_max += k * abs(x_max - x_min) - y_max += 0.4 * k * abs(y_max - y_min) - img = img.crop((int(x_min), int(y_min), int(x_max), int(y_max))) - - # We get the pose in radians - pose = utils.get_ypr_from_mat(mat_path) - # And convert to degrees. - pitch = pose[0] * 180 / np.pi - yaw = pose[1] * 180 / np.pi - roll = pose[2] * 180 / np.pi - # Bin values - bins = np.array(range(-99, 102, 3)) - labels = torch.LongTensor(np.digitize([yaw, pitch, roll], bins) - 1) - - if self.transform is not None: - img = self.transform(img) - - return img, labels, self.X_train[index] - - def __len__(self): - # 122,450 - return self.length - -class AFLW2000_binned(Dataset): - def __init__(self, data_dir, filename_path, transform, img_ext='.jpg', annot_ext='.mat'): - self.data_dir = data_dir - self.transform = transform - self.img_ext = img_ext - self.annot_ext = annot_ext - - filename_list = get_list_from_filenames(filename_path) - - self.X_train = filename_list - self.y_train = filename_list - self.length = len(filename_list) - - def __getitem__(self, index): - img = Image.open(os.path.join(self.data_dir, self.X_train[index] + self.img_ext)) - img = img.convert('RGB') + img = img.convert(self.image_mode) mat_path = os.path.join(self.data_dir, self.y_train[index] + self.annot_ext) # Crop the face @@ -167,6 +121,100 @@ # 2,000 return self.length +class AFLW(Dataset): + def __init__(self, data_dir, filename_path, transform, img_ext='.jpg', annot_ext='.txt', image_mode='RGB'): + self.data_dir = data_dir + self.transform = transform + self.img_ext = img_ext + self.annot_ext = annot_ext + + filename_list = get_list_from_filenames(filename_path) + + self.X_train = filename_list + self.y_train = filename_list + self.image_mode = image_mode + self.length = len(filename_list) + + def __getitem__(self, index): + img = Image.open(os.path.join(self.data_dir, self.X_train[index] + self.img_ext)) + img = img.convert(self.image_mode) + txt_path = os.path.join(self.data_dir, self.y_train[index] + self.annot_ext) + + # We get the pose in radians + annot = open(txt_path, 'r') + line = annot.readline().split(' ') + pose = [float(line[1]), float(line[2]), float(line[3])] + # And convert to degrees. + yaw = pose[0] * 180 / np.pi + pitch = pose[1] * 180 / np.pi + roll = pose[2] * 180 / np.pi + # Something weird with the roll in AFLW + if yaw < 0: + roll *= -1 + # Bin values + bins = np.array(range(-99, 102, 3)) + labels = torch.LongTensor(np.digitize([yaw, pitch, roll], bins) - 1) + + if self.transform is not None: + img = self.transform(img) + + return img, labels, self.X_train[index] + + def __len__(self): + # train: 18,863 + # test: 1,966 + return self.length + +class AFW(Dataset): + def __init__(self, data_dir, filename_path, transform, img_ext='.jpg', annot_ext='.txt', image_mode='RGB'): + self.data_dir = data_dir + self.transform = transform + self.img_ext = img_ext + self.annot_ext = annot_ext + + filename_list = get_list_from_filenames(filename_path) + + self.X_train = filename_list + self.y_train = filename_list + self.image_mode = image_mode + self.length = len(filename_list) + + def __getitem__(self, index): + txt_path = os.path.join(self.data_dir, self.y_train[index] + self.annot_ext) + img_name = self.X_train[index].split('_')[0] + + img = Image.open(os.path.join(self.data_dir, img_name + self.img_ext)) + img = img.convert(self.image_mode) + txt_path = os.path.join(self.data_dir, self.y_train[index] + self.annot_ext) + + # We get the pose in degrees + annot = open(txt_path, 'r') + line = annot.readline().split(' ') + yaw, pitch, roll = [float(line[1]), float(line[2]), float(line[3])] + + # Crop the face + margin = 40 + x_min = float(line[4]) - margin + y_min = float(line[5]) - margin + x_max = float(line[6]) + margin + y_max = float(line[7]) + margin + + img = img.crop((int(x_min), int(y_min), int(x_max), int(y_max))) + + # Bin values + bins = np.array(range(-99, 102, 3)) + labels = torch.LongTensor(np.digitize([yaw, pitch, roll], bins) - 1) + + if self.transform is not None: + img = self.transform(img) + + return img, labels, self.X_train[index] + + def __len__(self): + # Around 200 + return self.length + + def get_list_from_filenames(file_path): # input: relative path to .txt file with file names # output: list of relative path names -- Gitblit v1.8.0