natanielruiz
2017-10-26 f415df3448622f30c3d1eb680596871672b38dac
code/datasets.py
@@ -3,9 +3,10 @@
import cv2
from torch.utils.data.dataset import Dataset
import os
from PIL import Image
from PIL import Image, ImageFilter
import utils
from torchvision import transforms
def stack_grayscale_tensor(tensor):
    tensor = torch.cat([tensor, tensor, tensor], 0)
@@ -38,7 +39,9 @@
        x_max = max(pt2d[0,:])
        y_max = max(pt2d[1,:])
        k = 0.35
        # k = 0.35 was being used beforehand
        # k = 0.2 to 0.40
        k = np.random.random_sample() * 0.2 + 0.2
        x_min -= 0.6 * k * abs(x_max - x_min)
        y_min -= 2 * k * abs(y_max - y_min)
        x_max += 0.6 * k * abs(x_max - x_min)
@@ -59,15 +62,10 @@
            roll = -roll
            img = img.transpose(Image.FLIP_LEFT_RIGHT)
        # Rotate?
        # rnd = np.random.random_sample()
        # if rnd < 0.5:
        #     if roll >= 0:
        #         img = img.rotate(30)
        #         roll -= 30
        #     else:
        #         img = img.rotate(-30)
        #         roll += 30
        # Blur?
        rnd = np.random.random_sample()
        if rnd < 0.05:
            img = img.filter(ImageFilter.BLUR)
        # Bin values
        bins = np.array(range(-99, 102, 3))
@@ -77,11 +75,172 @@
        shape = np.load(shape_path)
        labels = torch.LongTensor(np.concatenate((binned_pose, shape), axis = 0))
        cont_labels = torch.FloatTensor([yaw, pitch, roll])
        if self.transform is not None:
            img = self.transform(img)
        return img, labels, self.X_train[index]
        return img, labels, cont_labels, self.X_train[index]
    def __len__(self):
        # 122,450
        return self.length
class Pose_300W_LP_random_ds(Dataset):
    def __init__(self, data_dir, filename_path, transform, img_ext='.jpg', annot_ext='.mat', image_mode='RGB'):
        self.data_dir = data_dir
        self.transform = transform
        self.img_ext = img_ext
        self.annot_ext = annot_ext
        filename_list = get_list_from_filenames(filename_path)
        self.X_train = filename_list
        self.y_train = filename_list
        self.image_mode = image_mode
        self.length = len(filename_list)
    def __getitem__(self, index):
        img = Image.open(os.path.join(self.data_dir, self.X_train[index] + self.img_ext))
        img = img.convert(self.image_mode)
        mat_path = os.path.join(self.data_dir, self.y_train[index] + self.annot_ext)
        shape_path = os.path.join(self.data_dir, self.y_train[index] + '_shape.npy')
        # Crop the face
        pt2d = utils.get_pt2d_from_mat(mat_path)
        x_min = min(pt2d[0,:])
        y_min = min(pt2d[1,:])
        x_max = max(pt2d[0,:])
        y_max = max(pt2d[1,:])
        # k = 0.2 to 0.40
        k = np.random.random_sample() * 0.2 + 0.2
        x_min -= 0.6 * k * abs(x_max - x_min)
        y_min -= 2 * k * abs(y_max - y_min)
        x_max += 0.6 * k * abs(x_max - x_min)
        y_max += 0.6 * k * abs(y_max - y_min)
        img = img.crop((int(x_min), int(y_min), int(x_max), int(y_max)))
        # We get the pose in radians
        pose = utils.get_ypr_from_mat(mat_path)
        # And convert to degrees.
        pitch = pose[0] * 180 / np.pi
        yaw = pose[1] * 180 / np.pi
        roll = pose[2] * 180 / np.pi
        ds = 1 + np.random.randint(0,4) * 5
        original_size = img.size
        img = img.resize((img.size[0] / ds, img.size[1] / ds), resample=Image.NEAREST)
        img = img.resize((original_size[0], original_size[1]), resample=Image.NEAREST)
        # Flip?
        rnd = np.random.random_sample()
        if rnd < 0.5:
            yaw = -yaw
            roll = -roll
            img = img.transpose(Image.FLIP_LEFT_RIGHT)
        # Blur?
        rnd = np.random.random_sample()
        if rnd < 0.05:
            img = img.filter(ImageFilter.BLUR)
        # Bin values
        bins = np.array(range(-99, 102, 3))
        binned_pose = np.digitize([yaw, pitch, roll], bins) - 1
        # Get shape
        shape = np.load(shape_path)
        labels = torch.LongTensor(np.concatenate((binned_pose, shape), axis = 0))
        cont_labels = torch.FloatTensor([yaw, pitch, roll])
        if self.transform is not None:
            img = self.transform(img)
        return img, labels, cont_labels, self.X_train[index]
    def __len__(self):
        # 122,450
        return self.length
class Pose_300W_LP_SR(Dataset):
    def __init__(self, data_dir, filename_path, transform, img_ext='.jpg', annot_ext='.mat', image_mode='RGB'):
        self.data_dir = data_dir
        self.transform = transform
        self.img_ext = img_ext
        self.annot_ext = annot_ext
        filename_list = get_list_from_filenames(filename_path)
        self.X_train = filename_list
        self.y_train = filename_list
        self.image_mode = image_mode
        self.length = len(filename_list)
    def __getitem__(self, index):
        img = Image.open(os.path.join(self.data_dir, self.X_train[index] + self.img_ext))
        img = img.convert(self.image_mode)
        mat_path = os.path.join(self.data_dir, self.y_train[index] + self.annot_ext)
        # Crop the face
        pt2d = utils.get_pt2d_from_mat(mat_path)
        x_min = min(pt2d[0,:])
        y_min = min(pt2d[1,:])
        x_max = max(pt2d[0,:])
        y_max = max(pt2d[1,:])
        # k = 0.2 to 0.40
        k = np.random.random_sample() * 0.2 + 0.2
        x_min -= 0.6 * k * abs(x_max - x_min)
        y_min -= 2 * k * abs(y_max - y_min)
        x_max += 0.6 * k * abs(x_max - x_min)
        y_max += 0.6 * k * abs(y_max - y_min)
        img = img.crop((int(x_min), int(y_min), int(x_max), int(y_max)))
        # We get the pose in radians
        pose = utils.get_ypr_from_mat(mat_path)
        # And convert to degrees.
        pitch = pose[0] * 180 / np.pi
        yaw = pose[1] * 180 / np.pi
        roll = pose[2] * 180 / np.pi
        rnd = np.random.random_sample()
        if rnd < 0.5:
            ds = 10
            original_size = img.size
            img = img.resize((img.size[0] / ds, img.size[1] / ds), resample=Image.NEAREST)
            img = img.resize((original_size[0], original_size[1]), resample=Image.NEAREST)
        # Flip?
        rnd = np.random.random_sample()
        if rnd < 0.5:
            yaw = -yaw
            roll = -roll
            img = img.transpose(Image.FLIP_LEFT_RIGHT)
        # Blur?
        rnd = np.random.random_sample()
        if rnd < 0.05:
            img = img.filter(ImageFilter.BLUR)
        img_ycc = img.convert('YCbCr')
        # Bin values
        bins = np.array(range(-99, 102, 3))
        binned_pose = np.digitize([yaw, pitch, roll], bins) - 1
        labels = torch.LongTensor(np.concatenate((binned_pose, shape), axis = 0))
        cont_labels = torch.FloatTensor([yaw, pitch, roll])
        # Transforms
        img = transforms.Scale(240)(img)
        img = transforms.RandomCrop(224)(img)
        img_ycc = img.convert('YCbCr')
        img = transforms.ToTensor()
        img_ycc = transforms.ToTensor()
        return img, img_ycc, labels, cont_labels, self.X_train[index]
    def __len__(self):
        # 122,450
@@ -108,24 +267,18 @@
        # Crop the face
        pt2d = utils.get_pt2d_from_mat(mat_path)
        x_min = min(pt2d[0,:])
        y_min = min(pt2d[1,:])
        x_max = max(pt2d[0,:])
        y_max = max(pt2d[1,:])
        k = 0.15
        x_min -= 0.6 * k * abs(x_max - x_min)
        k = 0.20
        x_min -= 2 * k * abs(x_max - x_min)
        y_min -= 2 * k * abs(y_max - y_min)
        x_max += 0.6 * k * abs(x_max - x_min)
        x_max += 2 * k * abs(x_max - x_min)
        y_max += 0.6 * k * abs(y_max - y_min)
        img = img.crop((int(x_min), int(y_min), int(x_max), int(y_max)))
        # k = 0.15
        # x_min -= k * abs(x_max - x_min)
        # y_min -= 4 * k * abs(y_max - y_min)
        # x_max += k * abs(x_max - x_min)
        # y_max += 0.4 * k * abs(y_max - y_min)
        # img = img.crop((int(x_min), int(y_min), int(x_max), int(y_max)))
        # We get the pose in radians
        pose = utils.get_ypr_from_mat(mat_path)
@@ -136,14 +289,141 @@
        # Bin values
        bins = np.array(range(-99, 102, 3))
        labels = torch.LongTensor(np.digitize([yaw, pitch, roll], bins) - 1)
        cont_labels = torch.FloatTensor([yaw, pitch, roll])
        if self.transform is not None:
            img = self.transform(img)
        return img, labels, self.X_train[index]
        return img, labels, cont_labels, self.X_train[index]
    def __len__(self):
        # 2,000
        return self.length
class AFLW2000_ds(Dataset):
    def __init__(self, data_dir, filename_path, transform, img_ext='.jpg', annot_ext='.mat', image_mode='RGB'):
        self.data_dir = data_dir
        self.transform = transform
        self.img_ext = img_ext
        self.annot_ext = annot_ext
        filename_list = get_list_from_filenames(filename_path)
        self.X_train = filename_list
        self.y_train = filename_list
        self.image_mode = image_mode
        self.length = len(filename_list)
    def __getitem__(self, index):
        img = Image.open(os.path.join(self.data_dir, self.X_train[index] + self.img_ext))
        img = img.convert(self.image_mode)
        mat_path = os.path.join(self.data_dir, self.y_train[index] + self.annot_ext)
        # Crop the face
        pt2d = utils.get_pt2d_from_mat(mat_path)
        x_min = min(pt2d[0,:])
        y_min = min(pt2d[1,:])
        x_max = max(pt2d[0,:])
        y_max = max(pt2d[1,:])
        k = 0.20
        x_min -= 2 * k * abs(x_max - x_min)
        y_min -= 2 * k * abs(y_max - y_min)
        x_max += 2 * k * abs(x_max - x_min)
        y_max += 0.6 * k * abs(y_max - y_min)
        img = img.crop((int(x_min), int(y_min), int(x_max), int(y_max)))
        ds = 3
        original_size = img.size
        img = img.resize((img.size[0] / ds, img.size[1] / ds), resample=Image.NEAREST)
        img = img.resize((original_size[0], original_size[1]), resample=Image.NEAREST)
        # We get the pose in radians
        pose = utils.get_ypr_from_mat(mat_path)
        # And convert to degrees.
        pitch = pose[0] * 180 / np.pi
        yaw = pose[1] * 180 / np.pi
        roll = pose[2] * 180 / np.pi
        # Bin values
        bins = np.array(range(-99, 102, 3))
        labels = torch.LongTensor(np.digitize([yaw, pitch, roll], bins) - 1)
        cont_labels = torch.FloatTensor([yaw, pitch, roll])
        if self.transform is not None:
            img = self.transform(img)
        return img, labels, cont_labels, self.X_train[index]
    def __len__(self):
        # 2,000
        return self.length
class AFLW_aug(Dataset):
    def __init__(self, data_dir, filename_path, transform, img_ext='.jpg', annot_ext='.txt', image_mode='RGB'):
        self.data_dir = data_dir
        self.transform = transform
        self.img_ext = img_ext
        self.annot_ext = annot_ext
        filename_list = get_list_from_filenames(filename_path)
        self.X_train = filename_list
        self.y_train = filename_list
        self.image_mode = image_mode
        self.length = len(filename_list)
    def __getitem__(self, index):
        img = Image.open(os.path.join(self.data_dir, self.X_train[index] + self.img_ext))
        img = img.convert(self.image_mode)
        txt_path = os.path.join(self.data_dir, self.y_train[index] + self.annot_ext)
        # We get the pose in radians
        annot = open(txt_path, 'r')
        line = annot.readline().split(' ')
        pose = [float(line[1]), float(line[2]), float(line[3])]
        # And convert to degrees.
        yaw = pose[0] * 180 / np.pi
        pitch = pose[1] * 180 / np.pi
        roll = pose[2] * 180 / np.pi
        # Something weird with the roll in AFLW
        roll *= -1
        # Augment
        # Flip?
        rnd = np.random.random_sample()
        if rnd < 0.5:
            yaw = -yaw
            roll = -roll
            img = img.transpose(Image.FLIP_LEFT_RIGHT)
        # Blur?
        # rnd = np.random.random_sample()
        # if rnd < 0.05:
        #     img = img.filter(ImageFilter.BLUR)
        #     if rnd < 0.025:
        #         img = img.filter(ImageFilter.BLUR)
        #
        # rnd = np.random.random_sample()
        # if rnd < 0.05:
        #     nb = np.random.randint(1,5)
        #     img = img.rotate(-nb)
        # elif rnd > 0.95:
        #     nb = np.random.randint(1,5)
        #     img = img.rotate(nb)
        # Bin values
        bins = np.array(range(-99, 102, 3))
        labels = torch.LongTensor(np.digitize([yaw, pitch, roll], bins) - 1)
        cont_labels = torch.FloatTensor([yaw, pitch, roll])
        if self.transform is not None:
            img = self.transform(img)
        return img, labels, cont_labels, self.X_train[index]
    def __len__(self):
        # train: 18,863
        # test: 1,966
        return self.length
class AFLW(Dataset):
@@ -178,11 +458,12 @@
        # Bin values
        bins = np.array(range(-99, 102, 3))
        labels = torch.LongTensor(np.digitize([yaw, pitch, roll], bins) - 1)
        cont_labels = torch.FloatTensor([yaw, pitch, roll])
        if self.transform is not None:
            img = self.transform(img)
        return img, labels, self.X_train[index]
        return img, labels, cont_labels, self.X_train[index]
    def __len__(self):
        # train: 18,863
@@ -217,22 +498,27 @@
        yaw, pitch, roll = [float(line[1]), float(line[2]), float(line[3])]
        # Crop the face
        k = 0.25
        x1 -= 0.6 * k * abs(x2 - x1)
        y1 -= 3 * k * abs(y2 - y1)
        x2 += 0.6 * k * abs(x2 - x1)
        y2 += 0.6 * k * abs(y2 - y1)
        k = 0.32
        x1 = float(line[4])
        y1 = float(line[5])
        x2 = float(line[6])
        y2 = float(line[7])
        x1 -= 0.8 * k * abs(x2 - x1)
        y1 -= 2 * k * abs(y2 - y1)
        x2 += 0.8 * k * abs(x2 - x1)
        y2 += 1 * k * abs(y2 - y1)
        img = img.crop((int(x_min), int(y_min), int(x_max), int(y_max)))
        img = img.crop((int(x1), int(y1), int(x2), int(y2)))
        # Bin values
        bins = np.array(range(-99, 102, 3))
        labels = torch.LongTensor(np.digitize([yaw, pitch, roll], bins) - 1)
        cont_labels = torch.FloatTensor([yaw, pitch, roll])
        if self.transform is not None:
            img = self.transform(img)
        return img, labels, self.X_train[index]
        return img, labels, cont_labels, self.X_train[index]
    def __len__(self):
        # Around 200
@@ -287,9 +573,11 @@
        R = R[:3,:]
        pose_annot.close()
        roll = np.arctan2(R[1][0], R[0][0]) * 180 / np.pi
        yaw = np.arctan2(-R[2][0], np.sqrt(R[2][1] ** 2 + R[2][2] ** 2)) * 180 / np.pi
        pitch = -np.arctan2(R[2][1], R[2][2]) * 180 / np.pi
        R = np.transpose(R)
        roll = -np.arctan2(R[1][0], R[0][0]) * 180 / np.pi
        yaw = -np.arctan2(-R[2][0], np.sqrt(R[2][1] ** 2 + R[2][2] ** 2)) * 180 / np.pi
        pitch = np.arctan2(R[2][1], R[2][2]) * 180 / np.pi
        # Loosely crop face
        k = 0.35
@@ -299,23 +587,17 @@
        y_max += 0.6 * k * abs(y_max - y_min)
        img = img.crop((int(x_min), int(y_min), int(x_max), int(y_max)))
        # Flip?
        # rnd = np.random.random_sample()
        # if rnd < 0.5:
        #     yaw = -yaw
        #     roll = -roll
        #     img = img.transpose(Image.FLIP_LEFT_RIGHT)
        # Bin values
        bins = np.array(range(-99, 102, 3))
        binned_pose = np.digitize([yaw, pitch, roll], bins) - 1
        labels = torch.LongTensor(binned_pose)
        cont_labels = torch.FloatTensor([yaw, pitch, roll])
        if self.transform is not None:
            img = self.transform(img)
        return img, labels, self.X_train[index]
        return img, labels, cont_labels, self.X_train[index]
    def __len__(self):
        # 15,667