From f111cb002b9c6065fdf6bb274ce5857a9e875e8c Mon Sep 17 00:00:00 2001 From: chenshijun <csj_sky@126.com> Date: 星期三, 05 六月 2019 15:38:49 +0800 Subject: [PATCH] face rectangle --- code/test_on_video_dockerface.py | 15 ++++++++------- 1 files changed, 8 insertions(+), 7 deletions(-) diff --git a/code/test_on_video_dockerface.py b/code/test_on_video_dockerface.py index 327b99a..30c262c 100644 --- a/code/test_on_video_dockerface.py +++ b/code/test_on_video_dockerface.py @@ -51,12 +51,12 @@ # ResNet50 structure model = hopenet.Hopenet(torchvision.models.resnet.Bottleneck, [3, 4, 6, 3], 66) - print 'Loading snapshot.' + print('Loading snapshot.') # Load snapshot saved_state_dict = torch.load(snapshot_path) model.load_state_dict(saved_state_dict) - print 'Loading data.' + print('Loading data.') transformations = transforms.Compose([transforms.Scale(224), transforms.CenterCrop(224), transforms.ToTensor(), @@ -64,7 +64,7 @@ model.cuda(gpu) - print 'Ready to test network.' + print('Ready to test network.') # Test the Model model.eval() # Change model to 'eval' mode (BN uses moving mean/var). @@ -105,7 +105,7 @@ line = line.split(' ') det_frame_num = int(line[0]) - print frame_num + print(frame_num) # Stop at a certain frame number if frame_num > args.n_frames: @@ -126,6 +126,7 @@ ret,frame = video.read() if ret == False: break + cv2_frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB) while True: x_min, y_min, x_max, y_max, conf = int(float(line[1])), int(float(line[2])), int(float(line[3])), int(float(line[4])), float(line[5]) @@ -146,7 +147,7 @@ x_max = min(frame.shape[1], x_max) y_max = min(frame.shape[0], y_max) # Crop image - img = frame[y_min:y_max,x_min:x_max] + img = cv2_frame[y_min:y_max,x_min:x_max] img = Image.fromarray(img) # Transform @@ -165,7 +166,7 @@ pitch_predicted = torch.sum(pitch_predicted.data[0] * idx_tensor) * 3 - 99 roll_predicted = torch.sum(roll_predicted.data[0] * idx_tensor) * 3 - 99 - # Print new frame with cube and axis + # print(new frame with cube and axis txt_out.write(str(frame_num) + ' %f %f %f\n' % (yaw_predicted, pitch_predicted, roll_predicted)) # utils.plot_pose_cube(frame, yaw_predicted, pitch_predicted, roll_predicted, (x_min + x_max) / 2, (y_min + y_max) / 2, size = bbox_width) utils.draw_axis(frame, yaw_predicted, pitch_predicted, roll_predicted, tdx = (x_min + x_max) / 2, tdy= (y_min + y_max) / 2, size = bbox_height/2) @@ -174,7 +175,7 @@ # Peek next frame detection next_frame_num = int(bbox_line_list[idx+1].strip('\n').split(' ')[0]) - # print 'next_frame_num ', next_frame_num + # print('next_frame_num ', next_frame_num if next_frame_num == det_frame_num: idx += 1 line = bbox_line_list[idx].strip('\n').split(' ') -- Gitblit v1.8.0