| | |
| | | softmax = nn.Softmax() |
| | | criterion = nn.CrossEntropyLoss().cuda() |
| | | reg_criterion = nn.MSELoss().cuda() |
| | | smooth_l1_loss = nn.SmoothL1Loss().cuda() |
| | | # Regression loss coefficient |
| | | alpha = args.alpha |
| | | |
| | |
| | | |
| | | print 'Second phase of training (finetuning layer).' |
| | | for epoch in range(num_epochs_ft): |
| | | for i, (images, labels, name) in enumerate(train_loader): |
| | | for i, (images, labels, cont_labels, name) in enumerate(train_loader): |
| | | images = Variable(images.cuda(gpu)) |
| | | label_yaw = Variable(labels[:,0].cuda(gpu)) |
| | | label_pitch = Variable(labels[:,1].cuda(gpu)) |
| | | label_roll = Variable(labels[:,2].cuda(gpu)) |
| | | label_angles = Variable(labels[:,:3].cuda(gpu)) |
| | | |
| | | label_angles = Variable(cont_labels[:,:3].cuda(gpu)) |
| | | |
| | | optimizer.zero_grad() |
| | | model.zero_grad() |
| | |
| | | # Finetuning loss |
| | | loss_seq = [] |
| | | for idx in xrange(1,len(angles)): |
| | | label_angles_residuals = label_angles.float() - angles[0] |
| | | label_angles_residuals = label_angles - (angles[0] * 3 - 99) |
| | | # for idy in xrange(1,idx): |
| | | # label_angles_residuals += angles[idy] * 3 - 99 |
| | | label_angles_residuals = label_angles_residuals.detach() |
| | | loss_angles = reg_criterion(angles[idx], label_angles_residuals) |
| | | # Reconvert to other unit |
| | | label_angles_residuals = label_angles_residuals / 3.0 + 33 |
| | | loss_angles = smooth_l1_loss(angles[idx], label_angles_residuals) |
| | | loss_seq.append(loss_angles) |
| | | |
| | | grad_seq = [torch.Tensor(1).cuda(gpu) for _ in range(len(loss_seq))] |