xingzilong
2017-08-18 9e5babf9db52e64bdae60137be7696e56241fca6
RtspFace/PL_SensetimeFaceDetect.cpp
@@ -1 +1,147 @@
#include "PL_SensetimeFaceDetect.h"
#include "MaterialBuffer.h"
#include "logger.h"
//#include "SensetimeFaceAPIWrapper/src/FaceDBPool.h"
//#include "SensetimeFaceAPIWrapper/src/faceAPI.h"
#include <opencv2/opencv.hpp>
#include <cv_face.h>
struct PL_SensetimeFaceDetect_Internal
{
   bool payError;
   PL_SensetimeFaceDetectConfig config;
   SensetimeFaceDetectResult lastResult;
   PL_SensetimeFaceDetect_Internal() :
      payError(true), config(), lastResult()
   {
   }
   ~PL_SensetimeFaceDetect_Internal()
   {
   }
   void reset()
   {
      payError = true;
      PL_SensetimeFaceDetectConfig _config;
      config = _config;
      SensetimeFaceDetectResult _lastResult;
      lastResult = _lastResult;
   }
};
PipeLineElem* create_PL_SensetimeFaceDetect()
{
   return new PL_SensetimeFaceDetect;
}
PL_SensetimeFaceDetect::PL_SensetimeFaceDetect() : internal(new PL_SensetimeFaceDetect_Internal)
{
}
PL_SensetimeFaceDetect::~PL_SensetimeFaceDetect()
{
   delete (PL_SensetimeFaceDetect_Internal*)internal;
   internal= nullptr;
}
bool PL_SensetimeFaceDetect::init(void* args)
{
   PL_SensetimeFaceDetect_Internal* in = (PL_SensetimeFaceDetect_Internal*)internal;
   in->reset();
   if (args != nullptr)
   {
      PL_SensetimeFaceDetectConfig* config = (PL_SensetimeFaceDetectConfig*)args;
      in->config = *config;
   }
   return true;
}
void PL_SensetimeFaceDetect::finit()
{
   PL_SensetimeFaceDetect_Internal* in = (PL_SensetimeFaceDetect_Internal*)internal;
}
bool PL_SensetimeFaceDetect::pay(const PipeMaterial& pm)
{
   PL_SensetimeFaceDetect_Internal* in = (PL_SensetimeFaceDetect_Internal*)internal;
   in->payError = true;
   {
      SensetimeFaceDetectResult _lastResult;
      in->lastResult = _lastResult;
   }
   if (pm.type != PipeMaterial::PMT_FRAME || !(in->config.payWithDbFrame))
   {
      LOG_ERROR << "Only support PMT_FRAME (SensetimeFaceDetectDbFrame)" << std::endl;
      return false;
   }
   if (pm.buffer == nullptr)
      return false;
   SensetimeFaceDetectDbFrame* frame = (SensetimeFaceDetectDbFrame*)pm.buffer;
   if (frame->type != MB_Frame::MBFT_YUV420)
   {
      LOG_ERROR << "Only support MBFT_YUV420" << std::endl;
      return false;
   }
   //FaceDB* _faceDB = (FaceDB*)frame->_faceDB;
   //if (_faceDB == nullptr)
   //{
   //   LOG_ERROR << "FaceDB is null" << std::endl;
   //   return false;
   //}
   const size_t expectedYUVSize = frame->width * frame->height * 1.5;
   if (frame->buffSize < expectedYUVSize)
   {
      LOG_WARN << "image size not ok" << std::endl;
      return false;
   }
   cv::Mat yuvMat(cv::Size(frame->width,frame->height), CV_8UC3, frame->buffer);
   //cv_feature_t* feature = _faceDB->extract_feature(yuvMat);
   //in->lastResult.st_id = _faceDB->search_db(feature);
   //in->lastResult.st_id = _faceDB->do_reasch(yuvMat);
   //#todo release feature
   in->lastResult.school_id = frame->school_id;
   in->payError = false;
   return true;
}
bool PL_SensetimeFaceDetect::gain(PipeMaterial& pm)
{
   PL_SensetimeFaceDetect_Internal* in = (PL_SensetimeFaceDetect_Internal*)internal;
   pm.former = this;
   if (in->payError)
   {
      return false;
   }
   if (! in->config.resultStructOnly)
   {
      LOG_ERROR << "Only support resultStructOnly" << std::endl;
      return false;
   }
   pm.type = PipeMaterial::PMT_BYTES;
   pm.buffer = &(in->lastResult);
   pm.buffSize = sizeof(SensetimeFaceDetectResult);
   return true;
}