From 43416c4717d2430c3e11f042294d12b781fee2e1 Mon Sep 17 00:00:00 2001 From: natanielruiz <nataniel777@hotmail.com> Date: 星期三, 27 九月 2017 04:09:30 +0800 Subject: [PATCH] Failed lstm experiment --- code/train.py | 52 ++++++++++++++++++++++++++++++++++------------------ 1 files changed, 34 insertions(+), 18 deletions(-) diff --git a/code/train.py b/code/train.py index a41edc0..3525f87 100644 --- a/code/train.py +++ b/code/train.py @@ -133,6 +133,8 @@ pose_dataset = datasets.BIWI(args.data_dir, args.filename_list, transformations) elif args.dataset == 'AFLW': pose_dataset = datasets.AFLW(args.data_dir, args.filename_list, transformations) + elif args.dataset == 'AFLW_aug': + pose_dataset = datasets.AFLW_aug(args.data_dir, args.filename_list, transformations) elif args.dataset == 'AFW': pose_dataset = datasets.AFW(args.data_dir, args.filename_list, transformations) else: @@ -147,6 +149,7 @@ softmax = nn.Softmax() criterion = nn.CrossEntropyLoss().cuda() reg_criterion = nn.MSELoss().cuda() + smooth_l1_loss = nn.SmoothL1Loss().cuda() # Regression loss coefficient alpha = args.alpha @@ -155,18 +158,23 @@ optimizer = torch.optim.Adam([{'params': get_ignored_params(model), 'lr': 0}, {'params': get_non_ignored_params(model), 'lr': args.lr}, - {'params': get_fc_params(model), 'lr': args.lr * 2}], + {'params': get_fc_params(model), 'lr': args.lr * 5}], lr = args.lr) print 'Ready to train network.' print 'First phase of training.' for epoch in range(num_epochs): - for i, (images, labels, name) in enumerate(train_loader): + for i, (images, labels, cont_labels, name) in enumerate(train_loader): images = Variable(images.cuda(gpu)) label_yaw = Variable(labels[:,0].cuda(gpu)) label_pitch = Variable(labels[:,1].cuda(gpu)) label_roll = Variable(labels[:,2].cuda(gpu)) + + label_angles = Variable(cont_labels[:,:3].cuda(gpu)) + label_yaw_cont = Variable(cont_labels[:,0].cuda(gpu)) + label_pitch_cont = Variable(cont_labels[:,1].cuda(gpu)) + label_roll_cont = Variable(cont_labels[:,2].cuda(gpu)) optimizer.zero_grad() model.zero_grad() @@ -183,13 +191,13 @@ pitch_predicted = softmax(pre_pitch) roll_predicted = softmax(pre_roll) - yaw_predicted = torch.sum(yaw_predicted * idx_tensor, 1) - pitch_predicted = torch.sum(pitch_predicted * idx_tensor, 1) - roll_predicted = torch.sum(roll_predicted * idx_tensor, 1) + yaw_predicted = torch.sum(yaw_predicted * idx_tensor, 1) * 3 - 99 + pitch_predicted = torch.sum(pitch_predicted * idx_tensor, 1) * 3 - 99 + roll_predicted = torch.sum(roll_predicted * idx_tensor, 1) * 3 - 99 - loss_reg_yaw = reg_criterion(yaw_predicted, label_yaw.float()) - loss_reg_pitch = reg_criterion(pitch_predicted, label_pitch.float()) - loss_reg_roll = reg_criterion(roll_predicted, label_roll.float()) + loss_reg_yaw = reg_criterion(yaw_predicted, label_yaw_cont) + loss_reg_pitch = reg_criterion(pitch_predicted, label_pitch_cont) + loss_reg_roll = reg_criterion(roll_predicted, label_roll_cont) # Total loss loss_yaw += alpha * loss_reg_yaw @@ -216,12 +224,16 @@ print 'Second phase of training (finetuning layer).' for epoch in range(num_epochs_ft): - for i, (images, labels, name) in enumerate(train_loader): + for i, (images, labels, cont_labels, name) in enumerate(train_loader): images = Variable(images.cuda(gpu)) label_yaw = Variable(labels[:,0].cuda(gpu)) label_pitch = Variable(labels[:,1].cuda(gpu)) label_roll = Variable(labels[:,2].cuda(gpu)) - label_angles = Variable(labels[:,:3].cuda(gpu)) + + label_angles = Variable(cont_labels[:,:3].cuda(gpu)) + label_yaw_cont = Variable(cont_labels[:,0].cuda(gpu)) + label_pitch_cont = Variable(cont_labels[:,1].cuda(gpu)) + label_roll_cont = Variable(cont_labels[:,2].cuda(gpu)) optimizer.zero_grad() model.zero_grad() @@ -238,13 +250,13 @@ pitch_predicted = softmax(pre_pitch) roll_predicted = softmax(pre_roll) - yaw_predicted = torch.sum(yaw_predicted * idx_tensor, 1) - pitch_predicted = torch.sum(pitch_predicted * idx_tensor, 1) - roll_predicted = torch.sum(roll_predicted * idx_tensor, 1) + yaw_predicted = torch.sum(yaw_predicted * idx_tensor, 1) * 3 - 99 + pitch_predicted = torch.sum(pitch_predicted * idx_tensor, 1) * 3 - 99 + roll_predicted = torch.sum(roll_predicted * idx_tensor, 1) * 3 - 99 - loss_reg_yaw = reg_criterion(yaw_predicted, label_yaw.float()) - loss_reg_pitch = reg_criterion(pitch_predicted, label_pitch.float()) - loss_reg_roll = reg_criterion(roll_predicted, label_roll.float()) + loss_reg_yaw = reg_criterion(yaw_predicted, label_yaw_cont) + loss_reg_pitch = reg_criterion(pitch_predicted, label_pitch_cont) + loss_reg_roll = reg_criterion(roll_predicted, label_roll_cont) # Total loss loss_yaw += alpha * loss_reg_yaw @@ -254,9 +266,13 @@ # Finetuning loss loss_seq = [loss_yaw, loss_pitch, loss_roll] for idx in xrange(1,len(angles)): - label_angles_residuals = label_angles.float() - angles[0] + label_angles_residuals = label_angles - angles[0] * 3 - 99 + for idy in xrange(1,idx): + label_angles_residuals += angles[idy] * 3 - 99 label_angles_residuals = label_angles_residuals.detach() - loss_angles = reg_criterion(angles[idx], label_angles_residuals) + # Reconvert to other unit + label_angles_residuals = label_angles_residuals / 3.0 + 33 + loss_angles = smooth_l1_loss(angles[idx], label_angles_residuals) loss_seq.append(loss_angles) grad_seq = [torch.Tensor(1).cuda(gpu) for _ in range(len(loss_seq))] -- Gitblit v1.8.0