From 704f0404795d8d5ef982549c1aa145434f0f0097 Mon Sep 17 00:00:00 2001
From: hyhmrright <hyhmrright@163.com>
Date: 星期五, 31 五月 2019 13:14:18 +0800
Subject: [PATCH] git ignore

---
 code/test_on_video_dlib.py |   14 +++++++-------
 1 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/code/test_on_video_dlib.py b/code/test_on_video_dlib.py
index 7f651f7..eefced7 100644
--- a/code/test_on_video_dlib.py
+++ b/code/test_on_video_dlib.py
@@ -58,12 +58,12 @@
     # Dlib face detection model
     cnn_face_detector = dlib.cnn_face_detection_model_v1(args.face_model)
 
-    print 'Loading snapshot.'
+    print('Loading snapshot.')
     # Load snapshot
     saved_state_dict = torch.load(snapshot_path)
     model.load_state_dict(saved_state_dict)
 
-    print 'Loading data.'
+    print ('Loading data.')
 
     transformations = transforms.Compose([transforms.Scale(224),
     transforms.CenterCrop(224), transforms.ToTensor(),
@@ -71,13 +71,13 @@
 
     model.cuda(gpu)
 
-    print 'Ready to test network.'
+    print('Ready to test network.')
 
     # Test the Model
     model.eval()  # Change model to 'eval' mode (BN uses moving mean/var).
     total = 0
 
-    idx_tensor = [idx for idx in xrange(66)]
+    idx_tensor = [idx for idx in range(66)]
     idx_tensor = torch.FloatTensor(idx_tensor).cuda(gpu)
 
     video = cv2.VideoCapture(video_path)
@@ -103,7 +103,7 @@
     frame_num = 1
 
     while frame_num <= args.n_frames:
-        print frame_num
+        print(frame_num)
 
         ret,frame = video.read()
         if ret == False:
@@ -132,7 +132,7 @@
                 x_min = max(x_min, 0); y_min = max(y_min, 0)
                 x_max = min(frame.shape[1], x_max); y_max = min(frame.shape[0], y_max)
                 # Crop image
-                img = cv2_frame[y_min:y_max,x_min:x_max]
+                img = cv2_frame[int(y_min):int(y_max),int(x_min):int(x_max)]
                 img = Image.fromarray(img)
 
                 # Transform
@@ -156,7 +156,7 @@
                 # utils.plot_pose_cube(frame, yaw_predicted, pitch_predicted, roll_predicted, (x_min + x_max) / 2, (y_min + y_max) / 2, size = bbox_width)
                 utils.draw_axis(frame, yaw_predicted, pitch_predicted, roll_predicted, tdx = (x_min + x_max) / 2, tdy= (y_min + y_max) / 2, size = bbox_height/2)
                 # Plot expanded bounding box
-                cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), (0,255,0), 1)
+                # cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), (0,255,0), 1)
 
         out.write(frame)
         frame_num += 1

--
Gitblit v1.8.0