From c495a0f6b13b794bab9f6e3423d5038ce645d816 Mon Sep 17 00:00:00 2001 From: natanielruiz <nataniel777@hotmail.com> Date: 星期三, 13 九月 2017 21:12:59 +0800 Subject: [PATCH] Batch testing and hopenet training complete --- code/train.py | 24 +++++++++++------------- 1 files changed, 11 insertions(+), 13 deletions(-) diff --git a/code/train.py b/code/train.py index ff060af..50eeb82 100644 --- a/code/train.py +++ b/code/train.py @@ -46,6 +46,8 @@ parser.add_argument('--output_string', dest='output_string', help='String appended to output snapshots.', default = '', type=str) parser.add_argument('--alpha', dest='alpha', help='Regression loss coefficient.', default=0.001, type=float) + parser.add_argument('--iter_ref', dest='iter_ref', help='Number of iterative refinement passes.', + default=1, type=int) args = parser.parse_args() return args @@ -111,7 +113,7 @@ # ResNet101 with 3 outputs # model = hopenet.Hopenet(torchvision.models.resnet.Bottleneck, [3, 4, 23, 3], 66) # ResNet50 - model = hopenet.Hopenet(torchvision.models.resnet.Bottleneck, [3, 4, 6, 3], 66) + model = hopenet.Hopenet(torchvision.models.resnet.Bottleneck, [3, 4, 6, 3], 66, args.iter_ref) # ResNet18 # model = hopenet.Hopenet(torchvision.models.resnet.BasicBlock, [2, 2, 2, 2], 66) load_filtered_state_dict(model, model_zoo.load_url(model_urls['resnet50'])) @@ -177,14 +179,12 @@ loss_reg_pitch = reg_criterion(pitch_predicted, label_pitch.float()) loss_reg_roll = reg_criterion(roll_predicted, label_roll.float()) - # print yaw_predicted, label_yaw.float(), loss_reg_yaw # Total loss loss_yaw += alpha * loss_reg_yaw loss_pitch += alpha * loss_reg_pitch loss_roll += alpha * loss_reg_roll loss_seq = [loss_yaw, loss_pitch, loss_roll] - # loss_seq = [loss_reg_yaw, loss_reg_pitch, loss_reg_roll] grad_seq = [torch.Tensor(1).cuda(gpu) for _ in range(len(loss_seq))] torch.autograd.backward(loss_seq, grad_seq) optimizer.step() @@ -226,9 +226,9 @@ pitch_predicted = softmax(pre_pitch) roll_predicted = softmax(pre_roll) - yaw_predicted = torch.sum(yaw_predicted.data * idx_tensor, 1) - pitch_predicted = torch.sum(pitch_predicted.data * idx_tensor, 1) - roll_predicted = torch.sum(roll_predicted.data * idx_tensor, 1) + yaw_predicted = torch.sum(yaw_predicted * idx_tensor, 1) + pitch_predicted = torch.sum(pitch_predicted * idx_tensor, 1) + roll_predicted = torch.sum(roll_predicted * idx_tensor, 1) loss_reg_yaw = reg_criterion(yaw_predicted, label_yaw.float()) loss_reg_pitch = reg_criterion(pitch_predicted, label_pitch.float()) @@ -240,9 +240,11 @@ loss_roll += alpha * loss_reg_roll # Finetuning loss - loss_angles = reg_criterion(angles[0], label_angles.float()) + loss_seq = [loss_yaw, loss_pitch, loss_roll] + for idx in xrange(args.iter_ref): + loss_angles = reg_criterion(angles[idx], label_angles.float()) + loss_seq.append(loss_angles) - loss_seq = [loss_yaw, loss_pitch, loss_roll, loss_angles] grad_seq = [torch.Tensor(1).cuda(gpu) for _ in range(len(loss_seq))] torch.autograd.backward(loss_seq, grad_seq) optimizer.step() @@ -255,11 +257,7 @@ # 'output/snapshots/' + args.output_string + '_iter_'+ str(i+1) + '.pkl') # Save models at numbered epochs. - if epoch % 1 == 0 and epoch < num_epochs_ft - 1: + if epoch % 1 == 0 and epoch < num_epochs_ft: print 'Taking snapshot...' torch.save(model.state_dict(), 'output/snapshots/' + args.output_string + '_epoch_'+ str(num_epochs+epoch+1) + '.pkl') - - - # Save the final Trained Model - torch.save(model.state_dict(), 'output/snapshots/' + args.output_string + '_epoch_' + str(num_epochs+epoch+1) + '.pkl') -- Gitblit v1.8.0