From e65c915e5bdbcca56b37aa13bcff4911beffbe37 Mon Sep 17 00:00:00 2001
From: hyhmrright <hyhmrright@163.com>
Date: 星期五, 31 五月 2019 13:13:35 +0800
Subject: [PATCH] change py2 to  py3

---
 code/test_on_video.py               |   12 +++---
 code/test_on_video_dlib.py          |   12 +++---
 code/train_alexnet.py               |   10 ++--
 code/train_resnet50_regression.py   |   10 ++--
 models/mmod_human_face_detector.dat |    0 
 snapshots/hopenet_alpha2.pkl        |    0 
 code/test_alexnet.py                |    8 ++--
 code/test_resnet50_regression.py    |    8 ++--
 code/train_hopenet.py               |    8 ++--
 snapshots/hopenet_robust_alpha1.pkl |    0 
 code/test_on_video_dockerface.py    |   12 +++---
 code/test_hopenet.py                |    8 ++--
 demo.txt                            |    1 
 snapshots/hopenet_alpha1.pkl        |    0 
 14 files changed, 45 insertions(+), 44 deletions(-)

diff --git a/code/test_alexnet.py b/code/test_alexnet.py
index 81a9148..45ad25f 100644
--- a/code/test_alexnet.py
+++ b/code/test_alexnet.py
@@ -45,12 +45,12 @@
 
     model = hopenet.AlexNet(66)
 
-    print 'Loading snapshot.'
+    print('Loading snapshot.')
     # Load snapshot
     saved_state_dict = torch.load(snapshot_path)
     model.load_state_dict(saved_state_dict)
 
-    print 'Loading data.'
+    print('Loading data.')
 
     transformations = transforms.Compose([transforms.Scale(224),
     transforms.CenterCrop(224), transforms.ToTensor(),
@@ -73,7 +73,7 @@
     elif args.dataset == 'AFW':
         pose_dataset = datasets.AFW(args.data_dir, args.filename_list, transformations)
     else:
-        print 'Error: not a valid dataset name'
+        print('Error: not a valid dataset name')
         sys.exit()
     test_loader = torch.utils.data.DataLoader(dataset=pose_dataset,
                                                batch_size=args.batch_size,
@@ -81,7 +81,7 @@
 
     model.cuda(gpu)
 
-    print 'Ready to test network.'
+    print ('Ready to test network.')
 
     # Test the Model
     model.eval()  # Change model to 'eval' mode (BN uses moving mean/var).
diff --git a/code/test_hopenet.py b/code/test_hopenet.py
index d4a9f5f..928c84c 100644
--- a/code/test_hopenet.py
+++ b/code/test_hopenet.py
@@ -46,12 +46,12 @@
     # ResNet50 structure
     model = hopenet.Hopenet(torchvision.models.resnet.Bottleneck, [3, 4, 6, 3], 66)
 
-    print 'Loading snapshot.'
+    print('Loading snapshot.')
     # Load snapshot
     saved_state_dict = torch.load(snapshot_path)
     model.load_state_dict(saved_state_dict)
 
-    print 'Loading data.'
+    print('Loading data.')
 
     transformations = transforms.Compose([transforms.Scale(224),
     transforms.CenterCrop(224), transforms.ToTensor(),
@@ -74,7 +74,7 @@
     elif args.dataset == 'AFW':
         pose_dataset = datasets.AFW(args.data_dir, args.filename_list, transformations)
     else:
-        print 'Error: not a valid dataset name'
+        print('Error: not a valid dataset name')
         sys.exit()
     test_loader = torch.utils.data.DataLoader(dataset=pose_dataset,
                                                batch_size=args.batch_size,
@@ -82,7 +82,7 @@
 
     model.cuda(gpu)
 
-    print 'Ready to test network.'
+    print('Ready to test network.')
 
     # Test the Model
     model.eval()  # Change model to 'eval' mode (BN uses moving mean/var).
diff --git a/code/test_on_video.py b/code/test_on_video.py
index 7c51348..0193035 100644
--- a/code/test_on_video.py
+++ b/code/test_on_video.py
@@ -51,12 +51,12 @@
     # ResNet50 structure
     model = hopenet.Hopenet(torchvision.models.resnet.Bottleneck, [3, 4, 6, 3], 66)
 
-    print 'Loading snapshot.'
+    print('Loading snapshot.')
     # Load snapshot
     saved_state_dict = torch.load(snapshot_path)
     model.load_state_dict(saved_state_dict)
 
-    print 'Loading data.'
+    print('Loading data.')
 
     transformations = transforms.Compose([transforms.Scale(224),
     transforms.CenterCrop(224), transforms.ToTensor(),
@@ -64,7 +64,7 @@
 
     model.cuda(gpu)
 
-    print 'Ready to test network.'
+    print('Ready to test network.')
 
     # Test the Model
     model.eval()  # Change model to 'eval' mode (BN uses moving mean/var).
@@ -105,7 +105,7 @@
         line = line.split(' ')
         det_frame_num = int(line[0])
 
-        print frame_num
+        print(frame_num)
 
         # Stop at a certain frame number
         if frame_num > args.n_frames:
@@ -165,7 +165,7 @@
             pitch_predicted = torch.sum(pitch_predicted.data[0] * idx_tensor) * 3 - 99
             roll_predicted = torch.sum(roll_predicted.data[0] * idx_tensor) * 3 - 99
 
-            # Print new frame with cube and axis
+            # print(new frame with cube and axis
             txt_out.write(str(frame_num) + ' %f %f %f\n' % (yaw_predicted, pitch_predicted, roll_predicted))
             # utils.plot_pose_cube(frame, yaw_predicted, pitch_predicted, roll_predicted, (x_min + x_max) / 2, (y_min + y_max) / 2, size = bbox_width)
             utils.draw_axis(frame, yaw_predicted, pitch_predicted, roll_predicted, tdx = (x_min + x_max) / 2, tdy= (y_min + y_max) / 2, size = bbox_height/2)
@@ -174,7 +174,7 @@
 
             # Peek next frame detection
             next_frame_num = int(bbox_line_list[idx+1].strip('\n').split(' ')[0])
-            # print 'next_frame_num ', next_frame_num
+            # print('next_frame_num ', next_frame_num
             if next_frame_num == det_frame_num:
                 idx += 1
                 line = bbox_line_list[idx].strip('\n').split(' ')
diff --git a/code/test_on_video_dlib.py b/code/test_on_video_dlib.py
index 375cd5e..eefced7 100644
--- a/code/test_on_video_dlib.py
+++ b/code/test_on_video_dlib.py
@@ -58,12 +58,12 @@
     # Dlib face detection model
     cnn_face_detector = dlib.cnn_face_detection_model_v1(args.face_model)
 
-    print 'Loading snapshot.'
+    print('Loading snapshot.')
     # Load snapshot
     saved_state_dict = torch.load(snapshot_path)
     model.load_state_dict(saved_state_dict)
 
-    print 'Loading data.'
+    print ('Loading data.')
 
     transformations = transforms.Compose([transforms.Scale(224),
     transforms.CenterCrop(224), transforms.ToTensor(),
@@ -71,13 +71,13 @@
 
     model.cuda(gpu)
 
-    print 'Ready to test network.'
+    print('Ready to test network.')
 
     # Test the Model
     model.eval()  # Change model to 'eval' mode (BN uses moving mean/var).
     total = 0
 
-    idx_tensor = [idx for idx in xrange(66)]
+    idx_tensor = [idx for idx in range(66)]
     idx_tensor = torch.FloatTensor(idx_tensor).cuda(gpu)
 
     video = cv2.VideoCapture(video_path)
@@ -103,7 +103,7 @@
     frame_num = 1
 
     while frame_num <= args.n_frames:
-        print frame_num
+        print(frame_num)
 
         ret,frame = video.read()
         if ret == False:
@@ -132,7 +132,7 @@
                 x_min = max(x_min, 0); y_min = max(y_min, 0)
                 x_max = min(frame.shape[1], x_max); y_max = min(frame.shape[0], y_max)
                 # Crop image
-                img = cv2_frame[y_min:y_max,x_min:x_max]
+                img = cv2_frame[int(y_min):int(y_max),int(x_min):int(x_max)]
                 img = Image.fromarray(img)
 
                 # Transform
diff --git a/code/test_on_video_dockerface.py b/code/test_on_video_dockerface.py
index 9f09824..30c262c 100644
--- a/code/test_on_video_dockerface.py
+++ b/code/test_on_video_dockerface.py
@@ -51,12 +51,12 @@
     # ResNet50 structure
     model = hopenet.Hopenet(torchvision.models.resnet.Bottleneck, [3, 4, 6, 3], 66)
 
-    print 'Loading snapshot.'
+    print('Loading snapshot.')
     # Load snapshot
     saved_state_dict = torch.load(snapshot_path)
     model.load_state_dict(saved_state_dict)
 
-    print 'Loading data.'
+    print('Loading data.')
 
     transformations = transforms.Compose([transforms.Scale(224),
     transforms.CenterCrop(224), transforms.ToTensor(),
@@ -64,7 +64,7 @@
 
     model.cuda(gpu)
 
-    print 'Ready to test network.'
+    print('Ready to test network.')
 
     # Test the Model
     model.eval()  # Change model to 'eval' mode (BN uses moving mean/var).
@@ -105,7 +105,7 @@
         line = line.split(' ')
         det_frame_num = int(line[0])
 
-        print frame_num
+        print(frame_num)
 
         # Stop at a certain frame number
         if frame_num > args.n_frames:
@@ -166,7 +166,7 @@
                 pitch_predicted = torch.sum(pitch_predicted.data[0] * idx_tensor) * 3 - 99
                 roll_predicted = torch.sum(roll_predicted.data[0] * idx_tensor) * 3 - 99
 
-                # Print new frame with cube and axis
+                # print(new frame with cube and axis
                 txt_out.write(str(frame_num) + ' %f %f %f\n' % (yaw_predicted, pitch_predicted, roll_predicted))
                 # utils.plot_pose_cube(frame, yaw_predicted, pitch_predicted, roll_predicted, (x_min + x_max) / 2, (y_min + y_max) / 2, size = bbox_width)
                 utils.draw_axis(frame, yaw_predicted, pitch_predicted, roll_predicted, tdx = (x_min + x_max) / 2, tdy= (y_min + y_max) / 2, size = bbox_height/2)
@@ -175,7 +175,7 @@
 
             # Peek next frame detection
             next_frame_num = int(bbox_line_list[idx+1].strip('\n').split(' ')[0])
-            # print 'next_frame_num ', next_frame_num
+            # print('next_frame_num ', next_frame_num
             if next_frame_num == det_frame_num:
                 idx += 1
                 line = bbox_line_list[idx].strip('\n').split(' ')
diff --git a/code/test_resnet50_regression.py b/code/test_resnet50_regression.py
index 67c63af..7edfa4f 100644
--- a/code/test_resnet50_regression.py
+++ b/code/test_resnet50_regression.py
@@ -45,12 +45,12 @@
 
     model = hopenet.ResNet(torchvision.models.resnet.Bottleneck, [3, 4, 6, 3], 3)
 
-    print 'Loading snapshot.'
+    print('Loading snapshot.')
     # Load snapshot
     saved_state_dict = torch.load(snapshot_path)
     model.load_state_dict(saved_state_dict)
 
-    print 'Loading data.'
+    print('Loading data.')
 
     transformations = transforms.Compose([transforms.Scale(224),
     transforms.CenterCrop(224), transforms.ToTensor(),
@@ -73,7 +73,7 @@
     elif args.dataset == 'AFW':
         pose_dataset = datasets.AFW(args.data_dir, args.filename_list, transformations)
     else:
-        print 'Error: not a valid dataset name'
+        print('Error: not a valid dataset name')
         sys.exit()
     test_loader = torch.utils.data.DataLoader(dataset=pose_dataset,
                                                batch_size=args.batch_size,
@@ -81,7 +81,7 @@
 
     model.cuda(gpu)
 
-    print 'Ready to test network.'
+    print('Ready to test network.')
 
     # Test the Model
     model.eval()  # Change model to 'eval' mode (BN uses moving mean/var).
diff --git a/code/train_alexnet.py b/code/train_alexnet.py
index 0c6c9db..68ed30d 100644
--- a/code/train_alexnet.py
+++ b/code/train_alexnet.py
@@ -94,7 +94,7 @@
     model = hopenet.AlexNet(66)
     load_filtered_state_dict(model, model_zoo.load_url(model_urls['alexnet']))
 
-    print 'Loading data.'
+    print('Loading data.')
 
     transformations = transforms.Compose([transforms.Scale(240),
     transforms.RandomCrop(224), transforms.ToTensor(),
@@ -115,7 +115,7 @@
     elif args.dataset == 'AFW':
         pose_dataset = datasets.AFW(args.data_dir, args.filename_list, transformations)
     else:
-        print 'Error: not a valid dataset name'
+        print('Error: not a valid dataset name')
         sys.exit()
     train_loader = torch.utils.data.DataLoader(dataset=pose_dataset,
                                                batch_size=batch_size,
@@ -137,7 +137,7 @@
                                   {'params': get_fc_params(model), 'lr': args.lr * 5}],
                                    lr = args.lr)
 
-    print 'Ready to train network.'
+    print('Ready to train network.')
     for epoch in range(num_epochs):
         for i, (images, labels, cont_labels, name) in enumerate(train_loader):
             images = Variable(images).cuda(gpu)
@@ -184,11 +184,11 @@
             optimizer.step()
 
             if (i+1) % 100 == 0:
-                print ('Epoch [%d/%d], Iter [%d/%d] Losses: Yaw %.4f, Pitch %.4f, Roll %.4f'
+                print('Epoch [%d/%d], Iter [%d/%d] Losses: Yaw %.4f, Pitch %.4f, Roll %.4f'
                        %(epoch+1, num_epochs, i+1, len(pose_dataset)//batch_size, loss_yaw.data[0], loss_pitch.data[0], loss_roll.data[0]))
 
         # Save models at numbered epochs.
         if epoch % 1 == 0 and epoch < num_epochs:
-            print 'Taking snapshot...'
+            print('Taking snapshot...')
             torch.save(model.state_dict(),
             'output/snapshots/' + args.output_string + '_epoch_'+ str(epoch+1) + '.pkl')
diff --git a/code/train_hopenet.py b/code/train_hopenet.py
index 00e65ea..56b9ca3 100644
--- a/code/train_hopenet.py
+++ b/code/train_hopenet.py
@@ -96,7 +96,7 @@
         saved_state_dict = torch.load(args.snapshot)
         model.load_state_dict(saved_state_dict)
 
-    print 'Loading data.'
+    print('Loading data.')
 
     transformations = transforms.Compose([transforms.Scale(240),
     transforms.RandomCrop(224), transforms.ToTensor(),
@@ -119,7 +119,7 @@
     elif args.dataset == 'AFW':
         pose_dataset = datasets.AFW(args.data_dir, args.filename_list, transformations)
     else:
-        print 'Error: not a valid dataset name'
+        print('Error: not a valid dataset name')
         sys.exit()
 
     train_loader = torch.utils.data.DataLoader(dataset=pose_dataset,
@@ -142,7 +142,7 @@
                                   {'params': get_fc_params(model), 'lr': args.lr * 5}],
                                    lr = args.lr)
 
-    print 'Ready to train network.'
+    print('Ready to train network.')
     for epoch in range(num_epochs):
         for i, (images, labels, cont_labels, name) in enumerate(train_loader):
             images = Variable(images).cuda(gpu)
@@ -195,6 +195,6 @@
 
         # Save models at numbered epochs.
         if epoch % 1 == 0 and epoch < num_epochs:
-            print 'Taking snapshot...'
+            print('Taking snapshot...')
             torch.save(model.state_dict(),
             'output/snapshots/' + args.output_string + '_epoch_'+ str(epoch+1) + '.pkl')
diff --git a/code/train_resnet50_regression.py b/code/train_resnet50_regression.py
index 04d27c7..da4a047 100644
--- a/code/train_resnet50_regression.py
+++ b/code/train_resnet50_regression.py
@@ -87,7 +87,7 @@
     model = hopenet.ResNet(torchvision.models.resnet.Bottleneck, [3, 4, 6, 3], 3)
     load_filtered_state_dict(model, model_zoo.load_url('https://download.pytorch.org/models/resnet50-19c8e357.pth'))
 
-    print 'Loading data.'
+    print('Loading data.')
 
     transformations = transforms.Compose([transforms.Scale(240),
     transforms.RandomCrop(224), transforms.ToTensor(),
@@ -108,7 +108,7 @@
     elif args.dataset == 'AFW':
         pose_dataset = datasets.AFW(args.data_dir, args.filename_list, transformations)
     else:
-        print 'Error: not a valid dataset name'
+        print('Error: not a valid dataset name')
         sys.exit()
     train_loader = torch.utils.data.DataLoader(dataset=pose_dataset,
                                                batch_size=batch_size,
@@ -123,8 +123,8 @@
                                   {'params': get_fc_params(model), 'lr': args.lr * 5}],
                                    lr = args.lr)
 
-    print 'Ready to train network.'
-    print 'First phase of training.'
+    print('Ready to train network.')
+    print('First phase of training.')
     for epoch in range(num_epochs):
         for i, (images, labels, cont_labels, name) in enumerate(train_loader):
             images = Variable(images).cuda(gpu)
@@ -143,6 +143,6 @@
 
         # Save models at numbered epochs.
         if epoch % 1 == 0 and epoch < num_epochs:
-            print 'Taking snapshot...'
+            print('Taking snapshot...')
             torch.save(model.state_dict(),
             'output/snapshots/' + args.output_string + '_epoch_'+ str(epoch+1) + '.pkl')
diff --git a/demo.txt b/demo.txt
new file mode 100644
index 0000000..8aa7957
--- /dev/null
+++ b/demo.txt
@@ -0,0 +1 @@
+python code/test_on_video_dlib.py  --snapshot ./snapshots/hopenet_robust_alpha1.pkl --face_model ./models/mmod_human_face_detector.dat --video ./videos/out2.mp4 --output_string "something you want to add to the output video name and result file name" --n_frames 500 --fps 30
\ No newline at end of file
diff --git a/models/mmod_human_face_detector.dat b/models/mmod_human_face_detector.dat
new file mode 100644
index 0000000..f1f73a5
--- /dev/null
+++ b/models/mmod_human_face_detector.dat
Binary files differ
diff --git a/snapshots/hopenet_alpha1.pkl b/snapshots/hopenet_alpha1.pkl
new file mode 100644
index 0000000..4013522
--- /dev/null
+++ b/snapshots/hopenet_alpha1.pkl
Binary files differ
diff --git a/snapshots/hopenet_alpha2.pkl b/snapshots/hopenet_alpha2.pkl
new file mode 100644
index 0000000..88cd9ad
--- /dev/null
+++ b/snapshots/hopenet_alpha2.pkl
Binary files differ
diff --git a/snapshots/hopenet_robust_alpha1.pkl b/snapshots/hopenet_robust_alpha1.pkl
new file mode 100644
index 0000000..541c4c2
--- /dev/null
+++ b/snapshots/hopenet_robust_alpha1.pkl
Binary files differ

--
Gitblit v1.8.0