From 017b7eb736ccc53c52f81486be8196d7fafc3289 Mon Sep 17 00:00:00 2001
From: houxiao <houxiao@454eff88-639b-444f-9e54-f578c98de674>
Date: 星期三, 28 十二月 2016 10:31:42 +0800
Subject: [PATCH] add MaterialBuffer
---
RtspFace/PL_RTSPServer.cpp | 12 +
RtspFace/PipeLine.h | 25 +-
RtspFace/PL_H264Decoder.cpp | 35 ++++
RtspFace/main.cpp | 28 ++--
RtspFace/PL_H264Encoder.cpp | 113 ++++++++++++---
RtspFace/live555/testProgs/testRTSPClient.hpp | 2
RtspFace/MaterialBuffer.h | 35 +++++
RtspFace/PL_Queue.cpp | 2
RtspFace/PL_RTSPClient.cpp | 27 ++-
RtspFace/PL_RTSPClient.h | 2
RtspFace/PL_AVFrameYUV420.cpp | 65 +++++----
RtspFace/PL_SensetimeFaceDetect.cpp | 32 ++++
12 files changed, 276 insertions(+), 102 deletions(-)
diff --git a/RtspFace/MaterialBuffer.h b/RtspFace/MaterialBuffer.h
new file mode 100644
index 0000000..399b637
--- /dev/null
+++ b/RtspFace/MaterialBuffer.h
@@ -0,0 +1,35 @@
+#ifndef _MATERIAL_BUFFER_H_
+#define _MATERIAL_BUFFER_H_
+
+#if defined(WIN32) || defined(_MSC_VER)
+ struct timeval {
+ time_t tv_sec; /* seconds */
+ suseconds_t tv_usec; /* microseconds */
+ };
+#else
+ #include <sys/time.h>
+#endif
+
+struct MB_Frame
+{
+ enum MBFType
+ {
+ MBFT__FIRST,
+ MBFT_JPEG,
+ MBFT_YUV420,
+ MBFT_BGRA,
+ MBFT_H264_NALU,
+ MBFT_H264_NALU_WITH_AUX,
+ MBFT_PTR_AVFRAME,
+ MBFT__LAST
+ };
+
+ MBFType type;
+ uint8_t* buffer;//#todo void*
+ size_t buffSize;
+ timeval pts;
+
+ MB_Frame() : type(MBFT__FIRST), buffer(nullptr), buffSize(0), pts() { }
+};
+
+#endif
diff --git a/RtspFace/PL_AVFrameYUV420.cpp b/RtspFace/PL_AVFrameYUV420.cpp
index fc85e4c..5462ba9 100644
--- a/RtspFace/PL_AVFrameYUV420.cpp
+++ b/RtspFace/PL_AVFrameYUV420.cpp
@@ -1,4 +1,5 @@
#include "PL_AVFrameYUV420.h"
+#include "MaterialBuffer.h"
extern "C"
{
@@ -12,9 +13,10 @@
uint8_t buffer[1920*1080*3];
size_t buffSize;
size_t buffSizeMax;
+ MB_Frame lastFrame;
AVFrameYUV420_Internal() :
- buffSize(0), buffSizeMax(sizeof(buffer))
+ buffSize(0), buffSizeMax(sizeof(buffer)), lastFrame()
{
}
@@ -25,6 +27,9 @@
void reset()
{
buffSize = 0;
+
+ MB_Frame _lastFrame;
+ lastFrame = _lastFrame;
}
};
@@ -61,7 +66,23 @@
{
AVFrameYUV420_Internal* in = (AVFrameYUV420_Internal*)internal;
- AVFrame* pAVFrame = (AVFrame*)pm.buffer;
+ if (pm.type != PipeMaterial::PMT_FRAME)
+ {
+ printf("PL_H264Encoder::pay only support PMT_FRAME\n");
+ return false;
+ }
+
+ if (pm.buffer == nullptr)
+ return false;
+
+ MB_Frame* frame = (MB_Frame*)pm.buffer;
+ if (frame->type != MB_Frame::MBFT_PTR_AVFRAME)
+ {
+ printf("PL_H264Encoder::pay only support MBFT_PTR_AVFRAME\n");
+ return false;
+ }
+
+ AVFrame* pAVFrame = (AVFrame*)frame->buffer;
if (pAVFrame == nullptr)
return false;
@@ -83,34 +104,21 @@
pBuff += height * width / 4;
in->buffSize = pBuff - in->buffer;
-
- // write yuv420
- //int a=0;
- //for (int i = 0; i < height; i++)
- //{
- // memcpy(in->buffer + a, pAVFrame->data[0] + i * pAVFrame->linesize[0], width);
- // a += width;
- //}
- //for (int i=0; i<height/2; i++)
- //{
- // memcpy(in->buffer + a, pAVFrame->data[1] + i * pAVFrame->linesize[1], width / 2);
- // a += width / 2;//#todo 4
- //}
- //for (int i=0; i<height/2; i++)
- //{
- // memcpy(in->buffer + a, pAVFrame->data[2] + i * pAVFrame->linesize[2], width / 2);
- // a += width / 2;
- //}
//in->buffer readly
+
+ in->lastFrame.type = MB_Frame::MBFT_YUV420;
+ in->lastFrame.buffer = in->buffer;
+ in->lastFrame.buffSize = in->buffSize;
+ in->lastFrame.pts = frame->pts;
//#test
- //static size_t f=0;
- //char fname[50];
- //sprintf(fname, "%u.yuv420", ++f);
- //FILE * pFile = fopen (fname,"wb");
- //fwrite (in->buffer , sizeof(char), in->buffSize, pFile);
- //fclose(pFile);
+ static size_t f=0;
+ char fname[50];
+ sprintf(fname, "%u.yuv420", ++f);
+ FILE * pFile = fopen (fname,"wb");
+ fwrite (in->buffer , sizeof(char), in->buffSize, pFile);
+ fclose(pFile);
return true;
}
@@ -119,8 +127,9 @@
{
AVFrameYUV420_Internal* in = (AVFrameYUV420_Internal*)internal;
- pm.buffer = in->buffer;
- pm.buffSize = in->buffSize;
+ pm.type = PipeMaterial::PMT_FRAME;
+ pm.buffer = (uint8_t*)(&(in->lastFrame));
+ pm.buffSize = sizeof(in->lastFrame);
pm.former = this;
return true;
}
diff --git a/RtspFace/PL_H264Decoder.cpp b/RtspFace/PL_H264Decoder.cpp
index 879fa6f..7a3b705 100644
--- a/RtspFace/PL_H264Decoder.cpp
+++ b/RtspFace/PL_H264Decoder.cpp
@@ -1,4 +1,5 @@
#include "PL_H264Decoder.h"
+#include "MaterialBuffer.h"
#include <H264VideoRTPSource.hh> // for SPropRecord
#include <libbase64.h>
@@ -20,12 +21,14 @@
AVCodecContext* pAVCodecContext;
AVFrame* pAVFrame;//#todo delete
+ MB_Frame lastFrame;
H264Decoder_Internal() :
//buffSize(0), buffSizeMax(sizeof(buffer)),
fmtp_set_to_context(false),
payError(true),
- pAVCodecContext(nullptr), pAVFrame(nullptr)
+ pAVCodecContext(nullptr), pAVFrame(nullptr),
+ lastFrame()
{
}
@@ -38,6 +41,9 @@
//buffSize = 0;
fmtp_set_to_context = false;
payError = true;
+
+ MB_Frame _lastFrame;
+ lastFrame = _lastFrame;
}
};
@@ -211,7 +217,27 @@
in->fmtp_set_to_context = true;
}
- bool ret = decodeH264(in, pm.buffer, pm.buffSize);
+ if (pm.buffer == nullptr || pm.buffSize <= 0)
+ return false;
+
+ bool ret = false;
+ if (pm.type == PipeMaterial::PMT_BYTES)
+ ret = decodeH264(in, pm.buffer, pm.buffSize);
+ else if (pm.type == PipeMaterial::PMT_FRAME)
+ {
+ MB_Frame* frame = (MB_Frame*)pm.buffer;
+
+ ret = decodeH264(in, frame->buffer, frame->buffSize);
+ if (ret)
+ {
+ in->lastFrame.type = MB_Frame::MBFT_PTR_AVFRAME;
+ in->lastFrame.buffer = (uint8_t*)(in->pAVFrame);
+ in->lastFrame.buffSize = sizeof(in->pAVFrame);
+ //in->lastFrame.pts = frame->pts;//#todo
+ gettimeofday(&(in->lastFrame.pts),NULL);
+ }
+ }
+
in->payError = !ret;
return ret;
}
@@ -222,8 +248,9 @@
if (!in->payError)
{
- pm.buffer = (uint8_t*)in->pAVFrame;//in->buffer;
- pm.buffSize = sizeof(uint8_t*);//in->buffSize;
+ pm.type = PipeMaterial::PMT_FRAME;
+ pm.buffer = (uint8_t*)(&(in->lastFrame));
+ pm.buffSize = sizeof(uint8_t*);
}
pm.former = this;
return !in->payError;
diff --git a/RtspFace/PL_H264Encoder.cpp b/RtspFace/PL_H264Encoder.cpp
index e2fcdf2..5547256 100644
--- a/RtspFace/PL_H264Encoder.cpp
+++ b/RtspFace/PL_H264Encoder.cpp
@@ -1,11 +1,14 @@
#include "PL_H264Encoder.h"
+#include "MaterialBuffer.h"
extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavutil/frame.h>
#include <libavformat/avformat.h>
- #include "libavutil/imgutils.h"
+ #include <libavutil/imgutils.h>
+
+ #include <libyuv.h>
}
struct H264Encoder_Internal
@@ -16,6 +19,7 @@
bool payError;
bool ffmpegInited;
size_t frameCount;
+ MB_Frame lastFrame;
AVCodecContext* pAVCodecContext;
AVFrame* pAVFrame;//#todo delete
@@ -25,8 +29,8 @@
H264Encoder_Internal() :
buffSize(0), buffSizeMax(sizeof(buffer)),
payError(true), ffmpegInited(false), frameCount(0),
- pAVCodecContext(nullptr), pAVFrame(nullptr), pAVStream(nullptr), pAVFormatContext(nullptr)
-
+ pAVCodecContext(nullptr), pAVFrame(nullptr), pAVStream(nullptr), pAVFormatContext(nullptr),
+ lastFrame()
{
}
@@ -40,6 +44,9 @@
payError = true;
ffmpegInited = false;
frameCount = 0;
+
+ MB_Frame _lastFrame;
+ lastFrame = _lastFrame;
pAVCodecContext = nullptr;
pAVFrame = nullptr;
@@ -93,11 +100,11 @@
in->pAVCodecContext = avcodec_alloc_context3(avCodec);
in->pAVCodecContext->bit_rate = 1*1024*1024*8; // 3MB
- in->pAVCodecContext->width = 1920;
- in->pAVCodecContext->height = 1080;//#todo from config
+ in->pAVCodecContext->width = 800;//#todo test
+ in->pAVCodecContext->height = 600;//#todo from config
in->pAVCodecContext->time_base.num=1;
in->pAVCodecContext->time_base.den=25;
- in->pAVCodecContext->gop_size = 20;
+ in->pAVCodecContext->gop_size = 25;
in->pAVCodecContext->max_b_frames = 0;
in->pAVCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
@@ -144,11 +151,39 @@
return true;
}
+#define SUBSAMPLE(v, a) ((((v) + (a) - 1)) / (a))
+
void copyAVFrame(AVFrame* dest, AVFrame* src)
{
- dest->data[0] = src->data[0];
- dest->data[1] = src->data[1];
- dest->data[2] = src->data[2];
+int src_width = src->width;
+int src_height = src->height;
+int dst_width = dest->width;
+int dst_height = dest->height;
+printf("I420Scale sw=%d, sh=%d, dw=%d, dh=%d\n", src_width,src_height,dst_width, dst_height);
+
+libyuv::I420Scale(src->data[0], src_width,
+ src->data[1], SUBSAMPLE(src_width, 2),
+ src->data[2], SUBSAMPLE(src_width, 2),
+ src_width, src_height,
+ dest->data[0], dst_width,
+ dest->data[1], SUBSAMPLE(dst_width, 2),
+ dest->data[2], SUBSAMPLE(dst_width, 2),
+ dst_width, dst_height,
+ libyuv::kFilterNone );
+
+ //#test
+ //static size_t f=0;
+ //char fname[50];
+ //sprintf(fname, "%u.yuv420", ++f);
+ //FILE * pFile = fopen (fname,"wb");
+ //fwrite (dest->data[0] , sizeof(char), dst_width * dst_height, pFile);
+ //fwrite (dest->data[1] , sizeof(char), dst_width * dst_height / 4, pFile);
+ //fwrite (dest->data[2] , sizeof(char), dst_width * dst_height / 4, pFile);
+ //fclose(pFile);
+
+ //dest->data[0] = src->data[0];
+ //dest->data[1] = src->data[1];
+ //dest->data[2] = src->data[2];
//int height = dest->height;
//int width = dest->width;
@@ -158,15 +193,19 @@
//memcpy(dest->data[2], src->data[2], height * width / 4); // V
}
-bool encodeH264(H264Encoder_Internal* in, AVFrame* pAVFrame, size_t buffSize)
+bool encodeH264(H264Encoder_Internal* in, AVFrame* pAVFrame, timeval pts)
{
in->buffSize = 0;
- in->frameCount++;
copyAVFrame(in->pAVFrame, pAVFrame);
//in->pAVFrame->pts = (1.0 / 25) * 90000 * in->frameCount;
- in->pAVFrame->pts = time(nullptr);
+ //in->pAVFrame->pts = time(nullptr);
+ //in->pAVFrame->pts = (pts.tv_sec * 1000 * 1000 + pts.tv_usec) / 90000 + in->frameCount;
+ //in->pAVFrame->pts = (pts.tv_sec * 1000 * 1000 + pts.tv_usec) / 90000 + ((1.0 / 25) * 90000 * in->frameCount);
+ in->pAVFrame->pts = (pts.tv_sec * 1000 * 1000 + pts.tv_usec) / 90000;
+ //in->pAVFrame->pts = pAVFrame->pkt_pts;
+ //in->pAVFrame->pts = (1.0 / 25) * 90000 * in->frameCount;
AVPacket pAVPacket = {0};
av_init_packet(&pAVPacket);
@@ -183,6 +222,7 @@
if (gotPacket > 0)
{
+ in->frameCount++;
printf("Succeed to encode (1) frame=%d, size=%d\n", in->frameCount, pAVPacket.size);
memcpy(in->buffer, pAVPacket.data, pAVPacket.size);
in->buffSize = pAVPacket.size;
@@ -190,7 +230,7 @@
}
//#todo finit
- //Flush Encoder
+ //Flush Encoder, when stop encoder
//while (gotPacket > 0)
//{
// ret = avcodec_encode_video2(in->pAVCodecContext, &pAVPacket, NULL, &gotPacket);
@@ -210,12 +250,12 @@
//#test
- if (in->buffSize > 0)
- {
- static FILE * pFile = fopen("out.h264","wba+");
- fwrite (in->buffer , sizeof(char), in->buffSize, pFile);
- fflush(pFile);
- }
+ //if (in->buffSize > 0)
+ //{
+ // static FILE * pFile = fopen("out.h264","wba+");
+ // fwrite (in->buffer , sizeof(char), in->buffSize, pFile);
+ // fflush(pFile);
+ //}
in->payError = (in->buffSize == 0);
return !(in->payError);
@@ -232,15 +272,40 @@
bool ret = initH264EncoderEnv(in);
if (!ret)
{
- printf("initH264EncoderEnv error");
+ printf("initH264EncoderEnv error\n");
return false;
}
else
in->ffmpegInited = true;
}
- bool ret = encodeH264(in, (AVFrame*)pm.buffer, pm.buffSize);
+ if (pm.type != PipeMaterial::PMT_FRAME)
+ {
+ printf("PL_H264Encoder::pay only support PMT_FRAME\n");
+ return false;
+ }
+
+ if (pm.buffer == nullptr)
+ return false;
+
+ MB_Frame* frame = (MB_Frame*)pm.buffer;
+ if (frame->type != MB_Frame::MBFT_PTR_AVFRAME)
+ {
+ printf("PL_H264Encoder::pay only support MBFT_PTR_AVFRAME\n");
+ return false;
+ }
+
+ bool ret = encodeH264(in, (AVFrame*)(frame->buffer), frame->pts);
in->payError = !ret;
+
+ if (ret)
+ {
+ in->lastFrame.type = MB_Frame::MBFT_H264_NALU;
+ in->lastFrame.buffer = in->buffer;
+ in->lastFrame.buffSize = in->buffSize;
+ in->lastFrame.pts = frame->pts;
+ }
+
return ret;
}
@@ -250,8 +315,10 @@
if (!in->payError)
{
- pm.buffer = in->buffer;
- pm.buffSize = in->buffSize;
+ pm.type = PipeMaterial::PMT_FRAME;
+ pm.buffer = (uint8_t*)(&(in->lastFrame));
+ pm.buffSize = sizeof(in->lastFrame);
+ pm.former = this;
}
pm.former = this;
return !in->payError;
diff --git a/RtspFace/PL_Queue.cpp b/RtspFace/PL_Queue.cpp
index 4641eec..7965877 100644
--- a/RtspFace/PL_Queue.cpp
+++ b/RtspFace/PL_Queue.cpp
@@ -350,7 +350,7 @@
if (in->config.syncQueueFull)
pthread_mutex_unlock(in->sync_full_mutex);
- pm.type = PMT_BYTES;
+ pm.type = PipeMaterial::PMT_BYTES;
pm.buffer = qb->data;
pm.buffSize = qb->size;
pm.former = this;
diff --git a/RtspFace/PL_RTSPClient.cpp b/RtspFace/PL_RTSPClient.cpp
index 41dad6a..71869db 100644
--- a/RtspFace/PL_RTSPClient.cpp
+++ b/RtspFace/PL_RTSPClient.cpp
@@ -1,9 +1,10 @@
#include "PL_RTSPClient.h"
+#include "MaterialBuffer.h"
#include <pthread.h>
void rtsp_client_sdp_callback(void* arg, const char* val);
void rtsp_client_fmtp_callback(void* arg, const char* val);
-void rtsp_client_frame_callback(void* arg, uint8_t* buffer, size_t buffSize);
+void rtsp_client_frame_callback(void* arg, uint8_t* buffer, size_t buffSize, timeval presentationTime);
void rtsp_client_continue_callback(void* arg);
//struct RTSPConfig;
#include "live555/testProgs/testRTSPClient.hpp"
@@ -17,14 +18,13 @@
pthread_mutex_t* frame_mutex;
pthread_mutex_t* continue_mutex;
- uint8_t* lastBuffer;
- size_t lastBuffSize;
+ MB_Frame lastFrame;
RTSPClient_Internal() :
rtspConfig(), live_daemon_thid(0),
eventLoopWatchVariable(0), live_daemon_running(false),
frame_mutex(new pthread_mutex_t), continue_mutex(new pthread_mutex_t),
- lastBuffer(nullptr), lastBuffSize(0)
+ lastFrame()
{
pthread_mutex_init(frame_mutex, NULL);
pthread_mutex_init(continue_mutex, NULL);
@@ -75,8 +75,8 @@
continue_mutex = new pthread_mutex_t;
pthread_mutex_init(continue_mutex, NULL);
- lastBuffer = nullptr;
- lastBuffSize = 0;
+ MB_Frame _lastFrame;
+ lastFrame = _lastFrame;
}
};
@@ -178,8 +178,9 @@
return false;
}
- pm.buffer = in->lastBuffer;
- pm.buffSize = in->lastBuffSize;
+ pm.type = PipeMaterial::PMT_FRAME;
+ pm.buffer = (uint8_t*)(&(in->lastFrame));
+ pm.buffSize = sizeof(in->lastFrame);
pm.former = this;
return true;
@@ -211,17 +212,19 @@
client->manager->set_global_param(PLGP_RTSP_FMTP, val);
}
-void rtsp_client_frame_callback(void* arg, uint8_t* buffer, size_t buffSize)
+void rtsp_client_frame_callback(void* arg, uint8_t* buffer, size_t buffSize, timeval presentationTime)
{
if (arg == nullptr || buffer == nullptr || buffSize == 0)
return;
PL_RTSPClient* client = (PL_RTSPClient*)arg;
RTSPClient_Internal* in = (RTSPClient_Internal*)(client->internal);
-
- in->lastBuffer = buffer;
- in->lastBuffSize = buffSize;
+ in->lastFrame.type = MB_Frame::MBFT_H264_NALU;
+ in->lastFrame.buffer = buffer;
+ in->lastFrame.buffSize = buffSize;
+ in->lastFrame.pts = presentationTime;
+
int ret = pthread_mutex_unlock(in->frame_mutex);
if(ret != 0)
{
diff --git a/RtspFace/PL_RTSPClient.h b/RtspFace/PL_RTSPClient.h
index e464586..91fa685 100644
--- a/RtspFace/PL_RTSPClient.h
+++ b/RtspFace/PL_RTSPClient.h
@@ -20,7 +20,7 @@
class PL_RTSPClient : public PipeLineElem
{
- friend void rtsp_client_frame_callback(void* arg, uint8_t* buffer, size_t buffSize);
+ friend void rtsp_client_frame_callback(void* arg, uint8_t* buffer, size_t buffSize, timeval presentationTime);
friend void rtsp_client_continue_callback(void* arg);
public:
diff --git a/RtspFace/PL_RTSPServer.cpp b/RtspFace/PL_RTSPServer.cpp
index ad5d1f9..6c6394b 100644
--- a/RtspFace/PL_RTSPServer.cpp
+++ b/RtspFace/PL_RTSPServer.cpp
@@ -1,4 +1,5 @@
#include "PL_RTSPServer.h"
+#include "MaterialBuffer.h"
#include <liveMedia.hh>
#include <BasicUsageEnvironment.hh>
@@ -216,11 +217,18 @@
if (pm.buffer == nullptr || pm.buffSize <= 0)
return false;
+ if (pm.type != PipeMaterial::PMT_FRAME)
+ {
+ printf("PL_RTSPServer::pay only support PMT_FRAME\n");
+ return false;
+ }
+
if (in->buffSize > 0)
printf("PL_RTSPServer::pay may lost data size=%u\n", in->buffSize);
- memcpy(in->buffer, pm.buffer, pm.buffSize);
- in->buffSize = pm.buffSize;
+ MB_Frame* frame = (MB_Frame*)pm.buffer;
+ memcpy(in->buffer, frame->buffer, frame->buffSize);
+ in->buffSize = frame->buffSize;
if (in->encoderStub == nullptr)
return false;
diff --git a/RtspFace/PL_SensetimeFaceDetect.cpp b/RtspFace/PL_SensetimeFaceDetect.cpp
index a6fba62..5df118c 100644
--- a/RtspFace/PL_SensetimeFaceDetect.cpp
+++ b/RtspFace/PL_SensetimeFaceDetect.cpp
@@ -1,15 +1,19 @@
#include "PL_SensetimeFaceDetect.h"
+#include <opencv2/opencv.hpp>
+#include <cv_face.h>
+
struct PL_SensetimeFaceDetect_Internal
{
uint8_t buffer[1920*1080*4];
size_t buffSize;
size_t buffSizeMax;
+ MB_Frame lastFrame;
bool payError;
PL_SensetimeFaceDetect_Internal() :
- buffSize(0), buffSizeMax(sizeof(buffer)),
+ buffSize(0), buffSizeMax(sizeof(buffer)), lastFrame(),
payError(true)
{
}
@@ -22,6 +26,9 @@
{
buffSize = 0;
payError = true;
+
+ MB_Frame _lastFrame;
+ lastFrame = _lastFrame;
}
};
@@ -58,6 +65,24 @@
{
PL_SensetimeFaceDetect_Internal* in = (PL_SensetimeFaceDetect_Internal*)internal;
+ if (pm.type != PipeMaterial::PMT_FRAME)
+ {
+ printf("PL_H264Encoder::pay only support PMT_FRAME\n");
+ return false;
+ }
+
+ if (pm.buffer == nullptr)
+ return false;
+
+ MB_Frame* frame = (MB_Frame*)pm.buffer;
+ if (frame->type != MB_Frame::MBFT_YUV420)
+ {
+ printf("PL_H264Encoder::pay only support MBFT_YUV420\n");
+ return false;
+ }
+
+
+
//in->buffer readly
//static size_t f=0;
@@ -74,8 +99,9 @@
{
PL_SensetimeFaceDetect_Internal* in = (PL_SensetimeFaceDetect_Internal*)internal;
- pm.buffer = in->buffer;
- pm.buffSize = in->buffSize;
+ pm.type = PipeMaterial::PMT_FRAME;
+ pm.buffer = (uint8_t*)(&(in->lastFrame));
+ pm.buffSize = sizeof(in->lastFrame);
pm.former = this;
return true;
}
diff --git a/RtspFace/PipeLine.h b/RtspFace/PipeLine.h
index 2a59df7..e690be3 100644
--- a/RtspFace/PipeLine.h
+++ b/RtspFace/PipeLine.h
@@ -12,24 +12,23 @@
class PipeLineElem;
class PipeLine;
-enum PipeMaterialBufferType
-{
- PMT__FIRST,
- PMT_BYTES,
- PMT_TEXT,
- PMT_IMAGE,
- PMT_PM_LIST,
- PMT_PTR_AVFRAME,
- PMT__LAST
-};
-
struct PipeMaterial;
typedef void (* pm_deleter_func)(PipeMaterial* pm);
+// PipeMaterial instance should be unref when pay() finished
struct PipeMaterial
{
- PipeMaterialBufferType type;
- uint8_t* buffer;
+ enum PipeMaterialBufferType
+ {
+ PMT__FIRST,
+ PMT_BYTES, // uint8_t[]
+ PMT_FRAME, // MB_Frame*
+ PMT_PM_LIST,
+ PMT__LAST
+ };
+
+ PipeMaterialBufferType type; // #todo MaterialBuffer merge into there
+ uint8_t* buffer;//#todo void*
size_t buffSize;
PipeLineElem* former;
pm_deleter_func deleter;
diff --git a/RtspFace/live555/testProgs/testRTSPClient.hpp b/RtspFace/live555/testProgs/testRTSPClient.hpp
index f7f5c4f..8f33f1a 100644
--- a/RtspFace/live555/testProgs/testRTSPClient.hpp
+++ b/RtspFace/live555/testProgs/testRTSPClient.hpp
@@ -539,7 +539,7 @@
unsigned s = frameSize;
if (sink->rtspConfig.aux)
s += 4;
- rtsp_client_frame_callback(sink->rtspConfig.args, sink->fReceiveBuffer, s);
+ rtsp_client_frame_callback(sink->rtspConfig.args, sink->fReceiveBuffer, s, presentationTime);
}
sink->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds);
diff --git a/RtspFace/main.cpp b/RtspFace/main.cpp
index ef672a1..b75f9ea 100644
--- a/RtspFace/main.cpp
+++ b/RtspFace/main.cpp
@@ -37,8 +37,18 @@
exit(EXIT_FAILURE);
}
}
-
- //{
+
+ {
+ PL_H264Decoder* h264Decoder = (PL_H264Decoder*)pipeLine.push_elem("PL_H264Decoder");
+ h264Decoder->init(nullptr);
+ }
+
+ {
+ PL_AVFrameYUV420* avFrameYUV420 = (PL_AVFrameYUV420*)pipeLine.push_elem("PL_AVFrameYUV420");
+ avFrameYUV420->init(nullptr);
+ }
+
+ //{//#todo queue should support deep copy
// PL_Queue_Config config;
// PL_Queue* queue1 = (PL_Queue*)pipeLine.push_elem("PL_Queue");
// bool ret = queue1->init(&config);
@@ -48,21 +58,11 @@
// exit(EXIT_FAILURE);
// }
//}
-
- {
- PL_H264Decoder* h264Decoder = (PL_H264Decoder*)pipeLine.push_elem("PL_H264Decoder");
- h264Decoder->init(nullptr);
- }
//{
- // PL_AVFrameYUV420* avFrameYUV420 = (PL_AVFrameYUV420*)pipeLine.push_elem("PL_AVFrameYUV420");
- // avFrameYUV420->init(nullptr);
+ // PL_H264Encoder* h264Encoder = (PL_H264Encoder*)pipeLine.push_elem("PL_H264Encoder");
+ // h264Encoder->init(nullptr);
//}
-
- {
- PL_H264Encoder* h264Encoder = (PL_H264Encoder*)pipeLine.push_elem("PL_H264Encoder");
- h264Encoder->init(nullptr);
- }
//{
// RTSPServerConfig config;
--
Gitblit v1.8.0