| | |
| | | "cells": [ |
| | | { |
| | | "cell_type": "code", |
| | | "execution_count": 7, |
| | | "execution_count": 6, |
| | | "metadata": { |
| | | "collapsed": true |
| | | }, |
| | |
| | | }, |
| | | { |
| | | "cell_type": "code", |
| | | "execution_count": 8, |
| | | "execution_count": 7, |
| | | "metadata": { |
| | | "collapsed": true |
| | | }, |
| | |
| | | }, |
| | | { |
| | | "cell_type": "code", |
| | | "execution_count": 9, |
| | | "execution_count": 8, |
| | | "metadata": { |
| | | "collapsed": false |
| | | }, |
| | | "outputs": [ |
| | | { |
| | | "name": "stdout", |
| | | "output_type": "stream", |
| | | "text": [ |
| | | "Done\n" |
| | | "ename": "KeyboardInterrupt", |
| | | "evalue": "", |
| | | "output_type": "error", |
| | | "traceback": [ |
| | | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", |
| | | "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", |
| | | "\u001b[0;32m<ipython-input-8-1f2606c2a679>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 34\u001b[0m \u001b[0;32mif\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mos\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0misfile\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput_path\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 35\u001b[0m \u001b[0;31m#image = cv2.imread(input_path, 0) #load in grayscale\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 36\u001b[0;31m \u001b[0mimage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcv2\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mimread\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput_path\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 37\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 38\u001b[0m \u001b[0;31m#Image dimensions\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", |
| | | "\u001b[0;31mKeyboardInterrupt\u001b[0m: " |
| | | ] |
| | | } |
| | | ], |
| | |
| | | " face_h = row[8]\n", |
| | | "\n", |
| | | " #Error correction\n", |
| | | " k = 0.15\n", |
| | | " x_min = face_x - image_w * k\n", |
| | | " x_max = face_x + image_w * (k+1)\n", |
| | | " y_min = face_y - image_h * k\n", |
| | | " y_max = face_y + image_h * (k+1)\n", |
| | | " k = 0.35\n", |
| | | " x_min = face_x - face_w * k * 0.6\n", |
| | | " x_max = face_x + face_w + face_w * k * 0.6\n", |
| | | " y_min = face_y - face_h * k * 2\n", |
| | | " y_max = face_y + face_h + face_h * k * 0.6\n", |
| | | " \n", |
| | | " x_min = int(max(0, x_min))\n", |
| | | " x_max = int(min(image_w, x_max))\n", |
| | | " y_min = int(max(0, y_min))\n", |
| | | " y_max = int(min(image_h, y_max))\n", |
| | | "\n", |
| | | " if(face_w > image_w): \n", |
| | | " face_w = image_w\n", |
| | | " face_h = image_w\n", |
| | | " if(face_h > image_h): \n", |
| | | " face_h = image_h\n", |
| | | " face_w = image_h\n", |
| | | " \n", |
| | | " #Crop the face from the image\n", |
| | | " image_cropped = np.copy(image[y_min:y_max, x_min:x_max])\n", |
| | | " #Uncomment the lines below if you want to rescale the image to a particular size\n", |
| | | " to_size = 260\n", |
| | | " to_size = 240\n", |
| | | " image_cropped = cv2.resize(image_cropped, (to_size,to_size), interpolation = cv2.INTER_AREA)\n", |
| | | " #Uncomment the line below if you want to use adaptive histogram normalisation\n", |
| | | " #clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(5,5))\n", |
| | |
| | | "cell_type": "code", |
| | | "execution_count": null, |
| | | "metadata": { |
| | | "collapsed": false |
| | | }, |
| | | "outputs": [], |
| | | "source": [ |
| | | "print 'test'" |
| | | ] |
| | | }, |
| | | { |
| | | "cell_type": "code", |
| | | "execution_count": null, |
| | | "metadata": { |
| | | "collapsed": true |
| | | }, |
| | | "outputs": [], |