From f415df3448622f30c3d1eb680596871672b38dac Mon Sep 17 00:00:00 2001
From: natanielruiz <nataniel777@hotmail.com>
Date: 星期四, 26 十月 2017 02:51:37 +0800
Subject: [PATCH] after fg

---
 practice/.ipynb_checkpoints/smoothing_ypr-checkpoint.ipynb |   75 ++++++++++++++++++++++++-------------
 1 files changed, 48 insertions(+), 27 deletions(-)

diff --git a/practice/.ipynb_checkpoints/smoothing_ypr-checkpoint.ipynb b/practice/.ipynb_checkpoints/smoothing_ypr-checkpoint.ipynb
index a411c30..350646c 100644
--- a/practice/.ipynb_checkpoints/smoothing_ypr-checkpoint.ipynb
+++ b/practice/.ipynb_checkpoints/smoothing_ypr-checkpoint.ipynb
@@ -2,9 +2,9 @@
  "cells": [
   {
    "cell_type": "code",
-   "execution_count": 156,
+   "execution_count": 8,
    "metadata": {
-    "collapsed": false
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -17,22 +17,22 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 157,
+   "execution_count": 9,
    "metadata": {
-    "collapsed": false
+    "collapsed": true
    },
    "outputs": [],
    "source": [
-    "video_path = '../data/video/SGT036_2016_07_25_pivothead_AVI.avi'\n",
-    "bbox_path = '../data/video/annotations/SGT036_childface.txt'\n",
+    "video_path = '../data/video/jacob_collier_live.mp4'\n",
+    "bbox_path = '../data/video/annotations/video-det-fold-jacob_collier_live.txt'\n",
     "\n",
-    "annot_path = '../output/video/output-SGT036_resnet18_cr_epoch_1.txt'\n",
-    "output_string = 'SGT036_resnet18_cr_epoch_1_flat_smoothed'"
+    "annot_path = '../output/video/output-jacob_normal_a1.txt'\n",
+    "output_string = 'jacob_smooth'"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 158,
+   "execution_count": 10,
    "metadata": {
     "collapsed": false
    },
@@ -41,9 +41,9 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "[-6.069214 -0.831665  0.53318  ..., -3.836042 -3.868275 -2.377155]\n",
-      "(8508,)\n",
-      "(53464,)\n"
+      "[ -2.073635 -10.835449   8.318871 ..., -20.763079 -12.68498   19.257992]\n",
+      "(19987,)\n",
+      "(21373,)\n"
      ]
     }
    ],
@@ -67,11 +67,15 @@
     "    list_p.append(float(line[2]))\n",
     "    list_r.append(float(line[3]))\n",
     "    \n",
+    "last_frame = 0\n",
     "for line in bbox:\n",
     "    line = line.strip('\\n')\n",
     "    line = line.split(' ')\n",
     "    frame = int(line[0])\n",
-    "    x_min, y_min, x_max, y_max = int(line[1]), int(line[2]), int(line[3]), int(line[4])\n",
+    "    if frame == last_frame:\n",
+    "        continue\n",
+    "    last_frame = frame\n",
+    "    x_min, y_min, x_max, y_max = int(float(line[1])), int(float(line[2])), int(float(line[3])), int(float(line[4]))\n",
     "    list_x_min.append(x_min)\n",
     "    list_x_max.append(x_max)\n",
     "    list_y_min.append(y_min)\n",
@@ -93,7 +97,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 159,
+   "execution_count": 11,
    "metadata": {
     "collapsed": false
    },
@@ -107,31 +111,39 @@
     }
    ],
    "source": [
-    "window_len = 6\n",
+    "window_len = 1\n",
     "pad = window_len / 2\n",
     "window = 'flat'\n",
+    "window_2 = 'flat'\n",
+    "window_len_2 = 7\n",
+    "pad_2 = window_len_2 / 2\n",
     "\n",
     "s = np.r_[y[window_len-1:0:-1],y,y[-2:-window_len-1:-1]]\n",
     "t = np.r_[p[window_len-1:0:-1],p,p[-2:-window_len-1:-1]]\n",
     "u = np.r_[r[window_len-1:0:-1],r,r[-2:-window_len-1:-1]]\n",
     "\n",
-    "xa = np.r_[x_min_arr[window_len-1:0:-1],x_min_arr,x_min_arr[-2:-window_len-1:-1]]\n",
-    "xb = np.r_[x_max_arr[window_len-1:0:-1],x_max_arr,x_max_arr[-2:-window_len-1:-1]]\n",
-    "ya = np.r_[y_min_arr[window_len-1:0:-1],y_min_arr,y_min_arr[-2:-window_len-1:-1]]\n",
-    "yb = np.r_[y_max_arr[window_len-1:0:-1],y_max_arr,y_max_arr[-2:-window_len-1:-1]]\n",
+    "xa = np.r_[x_min_arr[window_len_2-1:0:-1],x_min_arr,x_min_arr[-2:-window_len_2-1:-1]]\n",
+    "xb = np.r_[x_max_arr[window_len_2-1:0:-1],x_max_arr,x_max_arr[-2:-window_len_2-1:-1]]\n",
+    "ya = np.r_[y_min_arr[window_len_2-1:0:-1],y_min_arr,y_min_arr[-2:-window_len_2-1:-1]]\n",
+    "yb = np.r_[y_max_arr[window_len_2-1:0:-1],y_max_arr,y_max_arr[-2:-window_len_2-1:-1]]\n",
     "\n",
     "if window == 'flat':\n",
     "    w=np.ones(window_len, 'd')\n",
     "else:\n",
     "    w=eval('np.' + window + '(window_len)')\n",
+    "    \n",
+    "if window_2 == 'flat':\n",
+    "    w_2=np.ones(window_len_2, 'd')\n",
+    "else:\n",
+    "    w_2=eval('np.' + window_2 + '(window_len_2)')    \n",
     "\n",
     "y = np.convolve(w / w.sum(), s, mode='valid')[pad:-pad]\n",
     "p = np.convolve(w / w.sum(), t, mode='valid')[pad:-pad]\n",
     "r = np.convolve(w / w.sum(), u, mode='valid')[pad:-pad]\n",
-    "x_min_arr = np.convolve(w / w.sum(), xa, mode='valid')[pad:-pad]\n",
-    "x_max_arr = np.convolve(w / w.sum(), xb, mode='valid')[pad:-pad]\n",
-    "y_min_arr = np.convolve(w / w.sum(), ya, mode='valid')[pad:-pad]\n",
-    "y_max_arr = np.convolve(w / w.sum(), yb, mode='valid')[pad:-pad]\n",
+    "x_min_arr = np.convolve(w_2 / w_2.sum(), xa, mode='valid')[pad_2:-pad_2]\n",
+    "x_max_arr = np.convolve(w_2 / w_2.sum(), xb, mode='valid')[pad_2:-pad_2]\n",
+    "y_min_arr = np.convolve(w_2 / w_2.sum(), ya, mode='valid')[pad_2:-pad_2]\n",
+    "y_max_arr = np.convolve(w_2 / w_2.sum(), yb, mode='valid')[pad_2:-pad_2]\n",
     "\n",
     "pose_dict = {}\n",
     "bbox_dict = {}\n",
@@ -151,7 +163,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 160,
+   "execution_count": 14,
    "metadata": {
     "collapsed": false
    },
@@ -166,12 +178,21 @@
    ],
    "source": [
     "video = cv2.VideoCapture(video_path)\n",
+    "# New cv2\n",
     "width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))   # float\n",
     "height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)) # float\n",
     "\n",
     "# Define the codec and create VideoWriter object\n",
     "fourcc = cv2.VideoWriter_fourcc(*'MJPG')\n",
     "out = cv2.VideoWriter('../output/video/output-%s.avi' % output_string, fourcc, 30.0, (width, height))\n",
+    "\n",
+    "# Old cv2\n",
+    "# width = int(video.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))   # float\n",
+    "# height = int(video.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)) # float\n",
+    "\n",
+    "# # Define the codec and create VideoWriter object\n",
+    "# fourcc = cv2.cv.CV_FOURCC(*'MJPG')\n",
+    "# out = cv2.VideoWriter('../output/video/output-%s.avi' % output_string, fourcc, 30.0, (width, height))\n",
     "\n",
     "txt_out = open('../output/video/output-%s.txt' % output_string, 'w')\n",
     "\n",
@@ -196,7 +217,7 @@
     "\n",
     "        # Print new frame with cube and TODO: axis\n",
     "        txt_out.write(str(frame_num) + ' %f %f %f\\n' % (yaw_predicted, pitch_predicted, roll_predicted))\n",
-    "        utils.plot_pose_cube(frame, yaw_predicted, pitch_predicted, roll_predicted, (x_min + x_max) / 2, (y_min + y_max) / 2, size = 200)\n",
+    "        utils.plot_pose_cube(frame, yaw_predicted, pitch_predicted, roll_predicted, (x_min + x_max) / 2, (y_min + y_max) / 2, size = 80)\n",
     "        out.write(frame)\n",
     "    else:\n",
     "        out.write(frame)\n",
@@ -226,9 +247,9 @@
  "metadata": {
   "anaconda-cloud": {},
   "kernelspec": {
-   "display_name": "Python [conda root]",
+   "display_name": "Python [default]",
    "language": "python",
-   "name": "conda-root-py"
+   "name": "python2"
   },
   "language_info": {
    "codemirror_mode": {

--
Gitblit v1.8.0