| | |
| | | pose_dataset = datasets.BIWI(args.data_dir, args.filename_list, transformations) |
| | | elif args.dataset == 'AFLW': |
| | | pose_dataset = datasets.AFLW(args.data_dir, args.filename_list, transformations) |
| | | elif args.dataset == 'AFLW_aug': |
| | | pose_dataset = datasets.AFLW_aug(args.data_dir, args.filename_list, transformations) |
| | | elif args.dataset == 'AFW': |
| | | pose_dataset = datasets.AFW(args.data_dir, args.filename_list, transformations) |
| | | else: |
| | |
| | | softmax = nn.Softmax() |
| | | criterion = nn.CrossEntropyLoss().cuda() |
| | | reg_criterion = nn.MSELoss().cuda() |
| | | smooth_l1_loss = nn.SmoothL1Loss().cuda() |
| | | # Regression loss coefficient |
| | | alpha = args.alpha |
| | | |
| | |
| | | |
| | | optimizer = torch.optim.Adam([{'params': get_ignored_params(model), 'lr': 0}, |
| | | {'params': get_non_ignored_params(model), 'lr': args.lr}, |
| | | {'params': get_fc_params(model), 'lr': args.lr * 2}], |
| | | {'params': get_fc_params(model), 'lr': args.lr * 5}], |
| | | lr = args.lr) |
| | | |
| | | print 'Ready to train network.' |
| | | |
| | | print 'First phase of training.' |
| | | for epoch in range(num_epochs): |
| | | for i, (images, labels, name) in enumerate(train_loader): |
| | | for i, (images, labels, cont_labels, name) in enumerate(train_loader): |
| | | images = Variable(images.cuda(gpu)) |
| | | label_yaw = Variable(labels[:,0].cuda(gpu)) |
| | | label_pitch = Variable(labels[:,1].cuda(gpu)) |
| | | label_roll = Variable(labels[:,2].cuda(gpu)) |
| | | |
| | | label_angles = Variable(cont_labels[:,:3].cuda(gpu)) |
| | | label_yaw_cont = Variable(cont_labels[:,0].cuda(gpu)) |
| | | label_pitch_cont = Variable(cont_labels[:,1].cuda(gpu)) |
| | | label_roll_cont = Variable(cont_labels[:,2].cuda(gpu)) |
| | | |
| | | optimizer.zero_grad() |
| | | model.zero_grad() |
| | |
| | | pitch_predicted = softmax(pre_pitch) |
| | | roll_predicted = softmax(pre_roll) |
| | | |
| | | yaw_predicted = torch.sum(yaw_predicted * idx_tensor, 1) |
| | | pitch_predicted = torch.sum(pitch_predicted * idx_tensor, 1) |
| | | roll_predicted = torch.sum(roll_predicted * idx_tensor, 1) |
| | | yaw_predicted = torch.sum(yaw_predicted * idx_tensor, 1) * 3 - 99 |
| | | pitch_predicted = torch.sum(pitch_predicted * idx_tensor, 1) * 3 - 99 |
| | | roll_predicted = torch.sum(roll_predicted * idx_tensor, 1) * 3 - 99 |
| | | |
| | | loss_reg_yaw = reg_criterion(yaw_predicted, label_yaw.float()) |
| | | loss_reg_pitch = reg_criterion(pitch_predicted, label_pitch.float()) |
| | | loss_reg_roll = reg_criterion(roll_predicted, label_roll.float()) |
| | | loss_reg_yaw = reg_criterion(yaw_predicted, label_yaw_cont) |
| | | loss_reg_pitch = reg_criterion(pitch_predicted, label_pitch_cont) |
| | | loss_reg_roll = reg_criterion(roll_predicted, label_roll_cont) |
| | | |
| | | # Total loss |
| | | loss_yaw += alpha * loss_reg_yaw |
| | |
| | | |
| | | print 'Second phase of training (finetuning layer).' |
| | | for epoch in range(num_epochs_ft): |
| | | for i, (images, labels, name) in enumerate(train_loader): |
| | | for i, (images, labels, cont_labels, name) in enumerate(train_loader): |
| | | images = Variable(images.cuda(gpu)) |
| | | label_yaw = Variable(labels[:,0].cuda(gpu)) |
| | | label_pitch = Variable(labels[:,1].cuda(gpu)) |
| | | label_roll = Variable(labels[:,2].cuda(gpu)) |
| | | label_angles = Variable(labels[:,:3].cuda(gpu)) |
| | | |
| | | label_angles = Variable(cont_labels[:,:3].cuda(gpu)) |
| | | label_yaw_cont = Variable(cont_labels[:,0].cuda(gpu)) |
| | | label_pitch_cont = Variable(cont_labels[:,1].cuda(gpu)) |
| | | label_roll_cont = Variable(cont_labels[:,2].cuda(gpu)) |
| | | |
| | | optimizer.zero_grad() |
| | | model.zero_grad() |
| | |
| | | pitch_predicted = softmax(pre_pitch) |
| | | roll_predicted = softmax(pre_roll) |
| | | |
| | | yaw_predicted = torch.sum(yaw_predicted * idx_tensor, 1) |
| | | pitch_predicted = torch.sum(pitch_predicted * idx_tensor, 1) |
| | | roll_predicted = torch.sum(roll_predicted * idx_tensor, 1) |
| | | yaw_predicted = torch.sum(yaw_predicted * idx_tensor, 1) * 3 - 99 |
| | | pitch_predicted = torch.sum(pitch_predicted * idx_tensor, 1) * 3 - 99 |
| | | roll_predicted = torch.sum(roll_predicted * idx_tensor, 1) * 3 - 99 |
| | | |
| | | loss_reg_yaw = reg_criterion(yaw_predicted, label_yaw.float()) |
| | | loss_reg_pitch = reg_criterion(pitch_predicted, label_pitch.float()) |
| | | loss_reg_roll = reg_criterion(roll_predicted, label_roll.float()) |
| | | loss_reg_yaw = reg_criterion(yaw_predicted, label_yaw_cont) |
| | | loss_reg_pitch = reg_criterion(pitch_predicted, label_pitch_cont) |
| | | loss_reg_roll = reg_criterion(roll_predicted, label_roll_cont) |
| | | |
| | | # Total loss |
| | | loss_yaw += alpha * loss_reg_yaw |
| | | loss_pitch += alpha * loss_reg_pitch |
| | | loss_roll += alpha * loss_reg_roll |
| | | |
| | | loss_yaw *= 0.35 |
| | | |
| | | # Finetuning loss |
| | | loss_seq = [loss_yaw, loss_pitch, loss_roll] |
| | | for idx in xrange(1,len(angles)): |
| | | label_angles_residuals = label_angles.float() - angles[0] |
| | | label_angles_residuals = label_angles - angles[0] * 3 - 99 |
| | | for idy in xrange(1,idx): |
| | | label_angles_residuals += angles[idy] * 3 - 99 |
| | | label_angles_residuals = label_angles_residuals.detach() |
| | | loss_angles = reg_criterion(angles[idx], label_angles_residuals) |
| | | # Reconvert to other unit |
| | | label_angles_residuals = label_angles_residuals / 3.0 + 33 |
| | | loss_angles = smooth_l1_loss(angles[idx], label_angles_residuals) |
| | | loss_seq.append(loss_angles) |
| | | |
| | | grad_seq = [torch.Tensor(1).cuda(gpu) for _ in range(len(loss_seq))] |