From e65c915e5bdbcca56b37aa13bcff4911beffbe37 Mon Sep 17 00:00:00 2001
From: hyhmrright <hyhmrright@163.com>
Date: 星期五, 31 五月 2019 13:13:35 +0800
Subject: [PATCH] change py2 to  py3

---
 code/test_on_video_dlib.py |   35 +++++++++++++++++++----------------
 1 files changed, 19 insertions(+), 16 deletions(-)

diff --git a/code/test_on_video_dlib.py b/code/test_on_video_dlib.py
index d03415f..eefced7 100644
--- a/code/test_on_video_dlib.py
+++ b/code/test_on_video_dlib.py
@@ -58,12 +58,12 @@
     # Dlib face detection model
     cnn_face_detector = dlib.cnn_face_detection_model_v1(args.face_model)
 
-    print 'Loading snapshot.'
+    print('Loading snapshot.')
     # Load snapshot
     saved_state_dict = torch.load(snapshot_path)
     model.load_state_dict(saved_state_dict)
 
-    print 'Loading data.'
+    print ('Loading data.')
 
     transformations = transforms.Compose([transforms.Scale(224),
     transforms.CenterCrop(224), transforms.ToTensor(),
@@ -71,13 +71,13 @@
 
     model.cuda(gpu)
 
-    print 'Ready to test network.'
+    print('Ready to test network.')
 
     # Test the Model
     model.eval()  # Change model to 'eval' mode (BN uses moving mean/var).
     total = 0
 
-    idx_tensor = [idx for idx in xrange(66)]
+    idx_tensor = [idx for idx in range(66)]
     idx_tensor = torch.FloatTensor(idx_tensor).cuda(gpu)
 
     video = cv2.VideoCapture(video_path)
@@ -98,9 +98,13 @@
     # fourcc = cv2.cv.CV_FOURCC(*'MJPG')
     # out = cv2.VideoWriter('output/video/output-%s.avi' % args.output_string, fourcc, 30.0, (width, height))
 
+    txt_out = open('output/video/output-%s.txt' % args.output_string, 'w')
+
     frame_num = 1
 
     while frame_num <= args.n_frames:
+        print(frame_num)
+
         ret,frame = video.read()
         if ret == False:
             break
@@ -112,24 +116,23 @@
 
         for idx, det in enumerate(dets):
             # Get x_min, y_min, x_max, y_max, conf
-            x_min = d.rect.left()
-            y_min = d.rect.top()
-            x_max = d.rect.right()
-            y_max = d.rect.bottom()
-            conf = d.confidence
-            print x_min, y_min, x_max, y_max, conf
+            x_min = det.rect.left()
+            y_min = det.rect.top()
+            x_max = det.rect.right()
+            y_max = det.rect.bottom()
+            conf = det.confidence
 
-            if conf > 0.95:
+            if conf > 1.0:
                 bbox_width = abs(x_max - x_min)
                 bbox_height = abs(y_max - y_min)
-                x_min -= 3 * bbox_width / 4
-                x_max += 3 * bbox_width / 4
+                x_min -= 2 * bbox_width / 4
+                x_max += 2 * bbox_width / 4
                 y_min -= 3 * bbox_height / 4
                 y_max += bbox_height / 4
                 x_min = max(x_min, 0); y_min = max(y_min, 0)
                 x_max = min(frame.shape[1], x_max); y_max = min(frame.shape[0], y_max)
                 # Crop image
-                img = cv2_frame[y_min:y_max,x_min:x_max]
+                img = cv2_frame[int(y_min):int(y_max),int(x_min):int(x_max)]
                 img = Image.fromarray(img)
 
                 # Transform
@@ -155,8 +158,8 @@
                 # Plot expanded bounding box
                 # cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), (0,255,0), 1)
 
-            out.write(frame)
-            frame_num += 1
+        out.write(frame)
+        frame_num += 1
 
     out.release()
     video.release()

--
Gitblit v1.8.0