From 54818d253649ff588ed0054d10dabb2a3a170309 Mon Sep 17 00:00:00 2001
From: natanielruiz <nataniel777@hotmail.com>
Date: 星期四, 10 八月 2017 04:08:12 +0800
Subject: [PATCH] Doing pretty well now with resnet50 and adam with low learning rate. Also fixed test script to use large batches.

---
 code/train_resnet_bins.py |   47 ++++++++++++++++++++++++++++++++++++++++++++---
 1 files changed, 44 insertions(+), 3 deletions(-)

diff --git a/code/train_resnet_bins.py b/code/train_resnet_bins.py
index dab3800..f98bbc3 100644
--- a/code/train_resnet_bins.py
+++ b/code/train_resnet_bins.py
@@ -6,6 +6,7 @@
 from torchvision import transforms
 import torchvision
 import torch.backends.cudnn as cudnn
+import torch.nn.functional as F
 
 import cv2
 import matplotlib.pyplot as plt
@@ -113,11 +114,19 @@
 
     model.cuda(gpu)
     criterion = nn.CrossEntropyLoss()
+    reg_criterion = nn.MSELoss()
+    # Regression loss coefficient
+    alpha = 0.01
+    lsm = nn.Softmax()
+
+    idx_tensor = [idx for idx in xrange(66)]
+    idx_tensor = torch.FloatTensor(idx_tensor).cuda(gpu)
+
     optimizer = torch.optim.Adam([{'params': get_ignored_params(model), 'lr': args.lr},
                                   {'params': get_non_ignored_params(model), 'lr': args.lr * 10}],
                                   lr = args.lr)
     # optimizer = torch.optim.SGD([{'params': get_ignored_params(model), 'lr': args.lr},
-    #                               {'params': get_non_ignored_params(model), 'lr': args.lr}],
+    #                              {'params': get_non_ignored_params(model), 'lr': args.lr}],
     #                               lr = args.lr, momentum=0.9)
     # optimizer = torch.optim.RMSprop([{'params': get_ignored_params(model), 'lr': args.lr},
     #                               {'params': get_non_ignored_params(model), 'lr': args.lr * 10}],
@@ -134,24 +143,56 @@
 
             optimizer.zero_grad()
             yaw, pitch, roll = model(images)
+
             loss_yaw = criterion(yaw, label_yaw)
             loss_pitch = criterion(pitch, label_pitch)
             loss_roll = criterion(roll, label_roll)
 
+            # loss_seq = [loss_yaw, loss_pitch, loss_roll]
+            # grad_seq = [torch.Tensor(1).cuda(gpu) for _ in range(len(loss_seq))]
+            # torch.autograd.backward(loss_seq, grad_seq)
+            # optimizer.step()
+
+            # MSE loss
+            yaw_predicted = F.softmax(yaw)
+            pitch_predicted = F.softmax(pitch)
+            roll_predicted = F.softmax(roll)
+
+            yaw_predicted = torch.sum(yaw_predicted.data * idx_tensor, 1)
+            pitch_predicted = torch.sum(pitch_predicted.data * idx_tensor, 1)
+            roll_predicted = torch.sum(roll_predicted.data * idx_tensor, 1)
+
+            loss_reg_yaw = reg_criterion(yaw_predicted, label_yaw.float())
+            loss_reg_pitch = reg_criterion(pitch_predicted, label_pitch.float())
+            loss_reg_roll = reg_criterion(roll_predicted, label_roll.float())
+
+            # print yaw_predicted[0], label_yaw.data[0]
+
+            loss_yaw += alpha * loss_reg_yaw
+            loss_pitch += alpha * loss_reg_pitch
+            loss_roll += alpha * loss_reg_roll
+
             loss_seq = [loss_yaw, loss_pitch, loss_roll]
             grad_seq = [torch.Tensor(1).cuda(gpu) for _ in range(len(loss_seq))]
+            model.zero_grad()
             torch.autograd.backward(loss_seq, grad_seq)
             optimizer.step()
+
+            # print ('Epoch [%d/%d], Iter [%d/%d] Losses: Yaw %.4f, Pitch %.4f, Roll %.4f'
+            #        %(epoch+1, num_epochs, i+1, len(pose_dataset)//batch_size, loss_yaw.data[0], loss_pitch.data[0], loss_roll.data[0]))
 
             if (i+1) % 100 == 0:
                 print ('Epoch [%d/%d], Iter [%d/%d] Losses: Yaw %.4f, Pitch %.4f, Roll %.4f'
                        %(epoch+1, num_epochs, i+1, len(pose_dataset)//batch_size, loss_yaw.data[0], loss_pitch.data[0], loss_roll.data[0]))
+                # if epoch == 0:
+                #     torch.save(model.state_dict(),
+                #     'output/snapshots/resnet18_sgd_iter_'+ str(i+1) + '.pkl')
 
         # Save models at numbered epochs.
         if epoch % 1 == 0 and epoch < num_epochs - 1:
             print 'Taking snapshot...'
             torch.save(model.state_dict(),
-            'output/snapshots/resnet18_cr_epoch_'+ str(epoch+1) + '.pkl')
+            'output/snapshots/resnet18_sgd_epoch_'+ str(epoch+1) + '.pkl')
 
     # Save the final Trained Model
-    torch.save(model.state_dict(), 'output/snapshots/resnet18_cr_epoch_' + str(epoch+1) + '.pkl')
+    torch.save(model.state_dict(), 'output/snapshots/resnet18_sgd_epoch_' + str(epoch+1) + '.pkl')

--
Gitblit v1.8.0