From 6dd2ff502947ec809d420e2baefa023d821a8bb1 Mon Sep 17 00:00:00 2001
From: natanielruiz <nataniel777@hotmail.com>
Date: 星期四, 07 九月 2017 07:26:35 +0800
Subject: [PATCH] Omg

---
 code/train.py |   56 +++++++++++++++++++++++++++-----------------------------
 1 files changed, 27 insertions(+), 29 deletions(-)

diff --git a/code/train_resnet_bins_comb.py b/code/train.py
similarity index 80%
rename from code/train_resnet_bins_comb.py
rename to code/train.py
index eb23590..d80ed30 100644
--- a/code/train_resnet_bins_comb.py
+++ b/code/train.py
@@ -102,10 +102,15 @@
 
     print 'Loading data.'
 
-    transformations = transforms.Compose([transforms.Scale(224),transforms.RandomCrop(224),
-                                          transforms.ToTensor()])
+    # transformations = transforms.Compose([transforms.Scale(224),
+    #                                       transforms.RandomCrop(224),
+    #                                       transforms.ToTensor()])
 
-    pose_dataset = datasets.Pose_300W_LP_binned(args.data_dir, args.filename_list,
+    transformations = transforms.Compose([transforms.Scale(250),
+    transforms.RandomCrop(224), transforms.ToTensor(),
+    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
+
+    pose_dataset = datasets.Pose_300W_LP(args.data_dir, args.filename_list,
                                 transformations)
     train_loader = torch.utils.data.DataLoader(dataset=pose_dataset,
                                                batch_size=batch_size,
@@ -113,11 +118,10 @@
                                                num_workers=2)
 
     model.cuda(gpu)
-    criterion = nn.CrossEntropyLoss()
-    reg_criterion = nn.MSELoss()
+    criterion = nn.CrossEntropyLoss().cuda()
+    reg_criterion = nn.MSELoss().cuda()
     # Regression loss coefficient
-    alpha = 0.1
-    lsm = nn.Softmax()
+    alpha = 0.01
 
     idx_tensor = [idx for idx in xrange(66)]
     idx_tensor = torch.FloatTensor(idx_tensor).cuda(gpu)
@@ -126,32 +130,28 @@
                                   {'params': get_non_ignored_params(model), 'lr': args.lr * 10}],
                                   lr = args.lr)
     # optimizer = torch.optim.SGD([{'params': get_ignored_params(model), 'lr': args.lr},
-    #                              {'params': get_non_ignored_params(model), 'lr': args.lr}],
-    #                               lr = args.lr, momentum=0.9, weight_decay=5e-4)
-    # optimizer = torch.optim.RMSprop([{'params': get_ignored_params(model), 'lr': args.lr},
-    #                               {'params': get_non_ignored_params(model), 'lr': args.lr * 10}],
-    #                               lr = args.lr)
+    #                               {'params': get_non_ignored_params(model), 'lr': args.lr}],
+    #                               lr = args.lr,
+    #                               momentum = 0.9, weight_decay=0.01)
 
     print 'Ready to train network.'
 
     for epoch in range(num_epochs):
         for i, (images, labels, name) in enumerate(train_loader):
-            images = Variable(images).cuda(gpu)
-            label_yaw = Variable(labels[:,0]).cuda(gpu)
-            label_pitch = Variable(labels[:,1]).cuda(gpu)
-            label_roll = Variable(labels[:,2]).cuda(gpu)
+            images = Variable(images.cuda(gpu))
+            label_yaw = Variable(labels[:,0].cuda(gpu))
+            label_pitch = Variable(labels[:,1].cuda(gpu))
+            label_roll = Variable(labels[:,2].cuda(gpu))
 
             optimizer.zero_grad()
+            model.zero_grad()
+
             yaw, pitch, roll = model(images)
 
+            # Cross entropy loss
             loss_yaw = criterion(yaw, label_yaw)
             loss_pitch = criterion(pitch, label_pitch)
             loss_roll = criterion(roll, label_roll)
-
-            # loss_seq = [loss_yaw, loss_pitch, loss_roll]
-            # grad_seq = [torch.Tensor(1).cuda(gpu) for _ in range(len(loss_seq))]
-            # torch.autograd.backward(loss_seq, grad_seq)
-            # optimizer.step()
 
             # MSE loss
             yaw_predicted = F.softmax(yaw)
@@ -166,15 +166,13 @@
             loss_reg_pitch = reg_criterion(pitch_predicted, label_pitch.float())
             loss_reg_roll = reg_criterion(roll_predicted, label_roll.float())
 
-            # print yaw_predicted[0], label_yaw.data[0]
-
+            # Total loss
             loss_yaw += alpha * loss_reg_yaw
             loss_pitch += alpha * loss_reg_pitch
             loss_roll += alpha * loss_reg_roll
 
             loss_seq = [loss_yaw, loss_pitch, loss_roll]
             grad_seq = [torch.Tensor(1).cuda(gpu) for _ in range(len(loss_seq))]
-            model.zero_grad()
             torch.autograd.backward(loss_seq, grad_seq)
             optimizer.step()
 
@@ -184,15 +182,15 @@
             if (i+1) % 100 == 0:
                 print ('Epoch [%d/%d], Iter [%d/%d] Losses: Yaw %.4f, Pitch %.4f, Roll %.4f'
                        %(epoch+1, num_epochs, i+1, len(pose_dataset)//batch_size, loss_yaw.data[0], loss_pitch.data[0], loss_roll.data[0]))
-                if epoch == 0:
-                    torch.save(model.state_dict(),
-                    'output/snapshots/resnet50_iter_'+ str(i+1) + '.pkl')
+                # if epoch == 0:
+                #     torch.save(model.state_dict(),
+                #     'output/snapshots/resnet50_lbatch_iter_'+ str(i+1) + '.pkl')
 
         # Save models at numbered epochs.
         if epoch % 1 == 0 and epoch < num_epochs - 1:
             print 'Taking snapshot...'
             torch.save(model.state_dict(),
-            'output/snapshots/resnet50_epoch_'+ str(epoch+1) + '.pkl')
+            'output/snapshots/resnet50_norm_norot_epoch_'+ str(epoch+1) + '.pkl')
 
     # Save the final Trained Model
-    torch.save(model.state_dict(), 'output/snapshots/resnet50_epoch_' + str(epoch+1) + '.pkl')
+    torch.save(model.state_dict(), 'output/snapshots/resnet50_norm_norot_epoch_' + str(epoch+1) + '.pkl')

--
Gitblit v1.8.0