| | |
| | | #include <PL_ColorConv.h>
|
| | | #include <PL_AndroidMediaCodecEncoder.h>
|
| | | #include <PL_RTSPServer2.h>
|
| | | #include <iostream>
|
| | |
|
| | | CameraWrapper::~CameraWrapper()
|
| | | {
|
| | | stop();
|
| | |
| | | PipeLine::register_global_elem_creator("PL_RTSPClient", create_PL_RTSPClient);
|
| | | PipeLine::register_global_elem_creator("PL_AndroidMediaCodecDecoder", create_PL_AndroidMediaCodecDecoder);
|
| | | PipeLine::register_global_elem_creator("PL_AndroidSurfaceViewRender", create_PL_AndroidSurfaceViewRender);
|
| | | PipeLine::register_global_elem_creator("PL_SensetimeFaceTrack", create_PL_SensetimeFaceTrack);
|
| | | //PipeLine::register_global_elem_creator("PL_SensetimeFaceTrackMitiTrd", create_PL_SensetimeFaceTrackMultiTrd);
|
| | | //PipeLine::register_global_elem_creator("PL_SensetimeFaceTrack", create_PL_SensetimeFaceTrack);
|
| | | PipeLine::register_global_elem_creator("PL_SensetimeFaceTrackMitiTrd", create_PL_SensetimeFaceTrackMultiTrd);
|
| | | PipeLine::register_global_elem_creator("PL_Gainer", create_PL_Gainer);
|
| | | PipeLine::register_global_elem_creator("PL_Scale", create_PL_Scale);
|
| | | PipeLine::register_global_elem_creator("PL_ColorConv", create_PL_ColorConv);
|
| | |
| | | // return false;
|
| | | //}
|
| | |
|
| | | PL_Paint_Config plPaintCfg;
|
| | | plPaintCfg.fontPath = fontPath;
|
| | | plPaintCfg.plplCtx = &plplContext;
|
| | | PL_Paint* plPaint = (PL_Paint*)pipeLineRender->push_elem("PL_Paint");
|
| | | ret = plPaint->init(&plPaintCfg);
|
| | | if (!ret)
|
| | | {
|
| | | LOG_ERROR << "pipeLineRender.plPaint init error" << LOG_ENDL;
|
| | | return false;
|
| | | }
|
| | | // PL_Paint_Config plPaintCfg;
|
| | | // plPaintCfg.fontPath = fontPath;
|
| | | // plPaintCfg.plplCtx = &plplContext;
|
| | | // PL_Paint* plPaint = (PL_Paint*)pipeLineRender->push_elem("PL_Paint");
|
| | | // ret = plPaint->init(&plPaintCfg);
|
| | | // if (!ret)
|
| | | // {
|
| | | // LOG_ERROR << "pipeLineRender.plPaint init error" << LOG_ENDL;
|
| | | // return false;
|
| | | // }
|
| | |
|
| | | //PL_AndroidSurfaceViewRender* asvRender = (PL_AndroidSurfaceViewRender*)pipeLineRender->push_elem("PL_AndroidSurfaceViewRender");
|
| | | //ret = asvRender->init(&asvrConfig);
|
| | |
| | | //}
|
| | |
|
| | | #ifdef USE_ST_SDK
|
| | | PL_SensetimeFaceTrack *sfTrack = (PL_SensetimeFaceTrack *) pipeLineDecoderDetector->push_elem("PL_SensetimeFaceTrack");//#todo use multi
|
| | | // PL_SensetimeFaceTrack *sfTrack = (PL_SensetimeFaceTrack *) pipeLineDecoderDetector->push_elem("PL_SensetimeFaceTrack");//#todo use multi
|
| | | // ret = sfTrack->init(&sftConfig);
|
| | | // if (!ret)
|
| | | // {
|
| | | // LOG_ERROR << "pipeLineDecoderDetector.sfTrack.init error" << LOG_ENDL;
|
| | | // return false;
|
| | | // }
|
| | | PL_SensetimeFaceTrackMultiTrd *sfTrack = (PL_SensetimeFaceTrackMultiTrd *) pipeLineDecoderDetector->push_elem("PL_SensetimeFaceTrackMitiTrd");
|
| | | ret = sfTrack->init(&sftConfig);
|
| | | if (!ret)
|
| | | {
|
| | | LOG_ERROR << "pipeLineDecoderDetector.sfTrack.init error" << LOG_ENDL;
|
| | | return false;
|
| | | }
|
| | | //PL_SensetimeFaceTrackMultiTrd *sfTrack = (PL_SensetimeFaceTrackMultiTrd *) pipeLineDecoderDetector->push_elem("PL_SensetimeFaceTrackMitiTrd");
|
| | | //ret = sfTrack->init(&sftConfig);
|
| | | //if (!ret)
|
| | | //{
|
| | | // LOG_ERROR << "pipeLineDecoderDetector.sfTrack.init error" << LOG_ENDL;
|
| | | // return false;
|
| | | //}
|
| | | #endif
|
| | |
|
| | | return ret;
|
| | |
| | | return false;
|
| | | cameraWrapper.faceCache.cachePm(*pm);
|
| | | //remote call start, 为了保证通用性,未将以下步骤封入RtspFaceDetectClient
|
| | | RtspFaceDetectClient* client = getRtspFaceDetectClient();
|
| | | if (client == nullptr)
|
| | | return false;
|
| | | //#todo 优化封装
|
| | | try
|
| | | {
|
| | | LOG_INFO <<"try start"<< LOG_ENDL;
|
| | |
|
| | | auto request = client->fireFaceCountListenerRequest();
|
| | | request.setCameraIndex(cameraWrapper.cameraIdx);
|
| | | request.setFaceCount(cameraWrapper.faceCache.getFaceCount(*pm));
|
| | | auto sendAct = request.send();
|
| | | sendAct.wait(client->getWaitScope());
|
| | | // RtspFaceDetect::Client* rClient = getRtspFaceDetectClient()->getClient();
|
| | | // auto& waitScope = getRtspFaceDetectClient()->getWaitScope();
|
| | |
|
| | | RtspFaceDetectClient* client = getRtspFaceDetectClient();
|
| | | RtspFaceDetect::Client rClient = client->getEzRpcClient()->getMain<RtspFaceDetect>();
|
| | | auto& waitScope = client->getWaitScope();
|
| | |
|
| | | auto request = rClient.fireFaceCountListenerRequest();
|
| | | request.setCameraIndex(cameraWrapper.cameraIdx);
|
| | | request.setFaceCount(cameraWrapper.faceCache.getFaceCount(*pm));
|
| | | LOG_INFO <<cameraWrapper.cameraIdx << "+" << cameraWrapper.faceCache.getFaceCount(*pm)<< LOG_ENDL;
|
| | | LOG_INFO <<"call client : i have face!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"<< LOG_ENDL;
|
| | | auto sendAct = request.send();
|
| | | sendAct.ignoreResult().wait(waitScope);
|
| | | }
|
| | | catch (const kj::Exception& e)
|
| | | {
|
| | | LOG_INFO <<"catch!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"<< LOG_ENDL;
|
| | | LOG_ERROR << "catch!!!" <<e.getDescription().cStr() << LOG_ENDL;
|
| | | std::cout << e.getDescription().cStr() << std::endl;
|
| | | return false;
|
| | | }
|
| | | catch (std::exception e){
|
| | | LOG_ERROR << "catch!!!" <<e.what() << LOG_ENDL;
|
| | | }
|
| | | //remote call end
|
| | | return true;
|
| | | }
|