From f111cb002b9c6065fdf6bb274ce5857a9e875e8c Mon Sep 17 00:00:00 2001
From: chenshijun <csj_sky@126.com>
Date: 星期三, 05 六月 2019 15:38:49 +0800
Subject: [PATCH] face rectangle

---
 code/test_on_video_dlib.py |   20 ++++++++++++--------
 1 files changed, 12 insertions(+), 8 deletions(-)

diff --git a/code/test_on_video_dlib.py b/code/test_on_video_dlib.py
index 7f651f7..8ae74fa 100644
--- a/code/test_on_video_dlib.py
+++ b/code/test_on_video_dlib.py
@@ -58,12 +58,12 @@
     # Dlib face detection model
     cnn_face_detector = dlib.cnn_face_detection_model_v1(args.face_model)
 
-    print 'Loading snapshot.'
+    print('Loading snapshot.')
     # Load snapshot
     saved_state_dict = torch.load(snapshot_path)
     model.load_state_dict(saved_state_dict)
 
-    print 'Loading data.'
+    print ('Loading data.')
 
     transformations = transforms.Compose([transforms.Scale(224),
     transforms.CenterCrop(224), transforms.ToTensor(),
@@ -71,13 +71,13 @@
 
     model.cuda(gpu)
 
-    print 'Ready to test network.'
+    print('Ready to test network.')
 
     # Test the Model
     model.eval()  # Change model to 'eval' mode (BN uses moving mean/var).
     total = 0
 
-    idx_tensor = [idx for idx in xrange(66)]
+    idx_tensor = [idx for idx in range(66)]
     idx_tensor = torch.FloatTensor(idx_tensor).cuda(gpu)
 
     video = cv2.VideoCapture(video_path)
@@ -103,18 +103,22 @@
     frame_num = 1
 
     while frame_num <= args.n_frames:
-        print frame_num
+        print(frame_num)
 
         ret,frame = video.read()
+        
         if ret == False:
             break
-
+        
+        frame = cv2.resize(frame, (960, 540), interpolation=cv2.INTER_LINEAR)
+        print(frame.shape)
         cv2_frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
 
         # Dlib detect
         dets = cnn_face_detector(cv2_frame, 1)
 
         for idx, det in enumerate(dets):
+            print('**********************yes****************************yes********************************yes************************************')
             # Get x_min, y_min, x_max, y_max, conf
             x_min = det.rect.left()
             y_min = det.rect.top()
@@ -132,7 +136,7 @@
                 x_min = max(x_min, 0); y_min = max(y_min, 0)
                 x_max = min(frame.shape[1], x_max); y_max = min(frame.shape[0], y_max)
                 # Crop image
-                img = cv2_frame[y_min:y_max,x_min:x_max]
+                img = cv2_frame[int(y_min):int(y_max),int(x_min):int(x_max)]
                 img = Image.fromarray(img)
 
                 # Transform
@@ -156,7 +160,7 @@
                 # utils.plot_pose_cube(frame, yaw_predicted, pitch_predicted, roll_predicted, (x_min + x_max) / 2, (y_min + y_max) / 2, size = bbox_width)
                 utils.draw_axis(frame, yaw_predicted, pitch_predicted, roll_predicted, tdx = (x_min + x_max) / 2, tdy= (y_min + y_max) / 2, size = bbox_height/2)
                 # Plot expanded bounding box
-                cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), (0,255,0), 1)
+                # cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), (0,255,0), 1)
 
         out.write(frame)
         frame_num += 1

--
Gitblit v1.8.0