From f111cb002b9c6065fdf6bb274ce5857a9e875e8c Mon Sep 17 00:00:00 2001
From: chenshijun <csj_sky@126.com>
Date: 星期三, 05 六月 2019 15:38:49 +0800
Subject: [PATCH] face rectangle

---
 code/train_alexnet.py |   34 ++++++++++++++++++++--------------
 1 files changed, 20 insertions(+), 14 deletions(-)

diff --git a/code/train_alexnet.py b/code/train_alexnet.py
index 9254ee7..68ed30d 100644
--- a/code/train_alexnet.py
+++ b/code/train_alexnet.py
@@ -94,7 +94,7 @@
     model = hopenet.AlexNet(66)
     load_filtered_state_dict(model, model_zoo.load_url(model_urls['alexnet']))
 
-    print 'Loading data.'
+    print('Loading data.')
 
     transformations = transforms.Compose([transforms.Scale(240),
     transforms.RandomCrop(224), transforms.ToTensor(),
@@ -115,7 +115,7 @@
     elif args.dataset == 'AFW':
         pose_dataset = datasets.AFW(args.data_dir, args.filename_list, transformations)
     else:
-        print 'Error: not a valid dataset name'
+        print('Error: not a valid dataset name')
         sys.exit()
     train_loader = torch.utils.data.DataLoader(dataset=pose_dataset,
                                                batch_size=batch_size,
@@ -129,12 +129,15 @@
     # Regression loss coefficient
     alpha = args.alpha
 
+    idx_tensor = [idx for idx in xrange(66)]
+    idx_tensor = Variable(torch.FloatTensor(idx_tensor)).cuda(gpu)
+
     optimizer = torch.optim.Adam([{'params': get_ignored_params(model), 'lr': 0},
                                   {'params': get_non_ignored_params(model), 'lr': args.lr},
                                   {'params': get_fc_params(model), 'lr': args.lr * 5}],
                                    lr = args.lr)
 
-    print 'Ready to train network.'
+    print('Ready to train network.')
     for epoch in range(num_epochs):
         for i, (images, labels, cont_labels, name) in enumerate(train_loader):
             images = Variable(images).cuda(gpu)
@@ -150,17 +153,21 @@
             label_roll_cont = Variable(cont_labels[:,2]).cuda(gpu)
 
             # Forward pass
-            yaw, pitch, roll, angles = model(images)
+            pre_yaw, pre_pitch, pre_roll = model(images)
 
             # Cross entropy loss
-            loss_yaw = criterion(yaw, label_yaw)
-            loss_pitch = criterion(pitch, label_pitch)
-            loss_roll = criterion(roll, label_roll)
+            loss_yaw = criterion(pre_yaw, label_yaw)
+            loss_pitch = criterion(pre_pitch, label_pitch)
+            loss_roll = criterion(pre_roll, label_roll)
 
             # MSE loss
-            yaw_predicted = angles[:,0]
-            pitch_predicted = angles[:,1]
-            roll_predicted = angles[:,2]
+            yaw_predicted = softmax(pre_yaw)
+            pitch_predicted = softmax(pre_pitch)
+            roll_predicted = softmax(pre_roll)
+
+            yaw_predicted = torch.sum(yaw_predicted * idx_tensor, 1) * 3 - 99
+            pitch_predicted = torch.sum(pitch_predicted * idx_tensor, 1) * 3 - 99
+            roll_predicted = torch.sum(roll_predicted * idx_tensor, 1) * 3 - 99
 
             loss_reg_yaw = reg_criterion(yaw_predicted, label_yaw_cont)
             loss_reg_pitch = reg_criterion(pitch_predicted, label_pitch_cont)
@@ -172,17 +179,16 @@
             loss_roll += alpha * loss_reg_roll
 
             loss_seq = [loss_yaw, loss_pitch, loss_roll]
-            grad_seq = [torch.Tensor(1).cuda(gpu) for _ in range(len(loss_seq))]
-            optimizer.zero_grad()
+            grad_seq = [torch.ones(1).cuda(gpu) for _ in range(len(loss_seq))]
             torch.autograd.backward(loss_seq, grad_seq)
             optimizer.step()
 
             if (i+1) % 100 == 0:
-                print ('Epoch [%d/%d], Iter [%d/%d] Losses: Yaw %.4f, Pitch %.4f, Roll %.4f'
+                print('Epoch [%d/%d], Iter [%d/%d] Losses: Yaw %.4f, Pitch %.4f, Roll %.4f'
                        %(epoch+1, num_epochs, i+1, len(pose_dataset)//batch_size, loss_yaw.data[0], loss_pitch.data[0], loss_roll.data[0]))
 
         # Save models at numbered epochs.
         if epoch % 1 == 0 and epoch < num_epochs:
-            print 'Taking snapshot...'
+            print('Taking snapshot...')
             torch.save(model.state_dict(),
             'output/snapshots/' + args.output_string + '_epoch_'+ str(epoch+1) + '.pkl')

--
Gitblit v1.8.0