RtspFace/PL_AVFrameBGRA.cpp | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
RtspFace/PL_AVFrameYUV420.cpp | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
RtspFace/PL_H264Decoder.cpp | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
RtspFace/PL_H264Decoder.h | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
RtspFace/PL_H264Encoder.cpp | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
RtspFace/PL_H264Encoder.h | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
RtspFace/PL_RTSPClient.cpp | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
RtspFace/PL_RTSPClient.h | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
RtspFace/PL_RTSPServer.cpp | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
RtspFace/PL_SensetimeFaceDetect.cpp | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
RtspFace/PipeLine.cpp | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
RtspFace/live555/testProgs/testRTSPClient.hpp | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
RtspFace/logger.h | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
RtspFace/main.cpp | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 |
RtspFace/PL_AVFrameBGRA.cpp
@@ -13,7 +13,7 @@ struct PL_AVFrameBGRA_Internal { uint8_t buffer[1920*1080*4];//#todo uint8_t buffer[1920*1080*4];//#todo from config size_t buffSize; size_t buffSizeMax; MB_Frame lastFrame; @@ -69,15 +69,15 @@ } #define SUBSAMPLE(v, a) ((((v) + (a) - 1)) / (a)) bool PL_AVFrameBGRA::pay(const PipeMaterial& pm) { #define SUBSAMPLE(v, a) ((((v) + (a) - 1)) / (a)) PL_AVFrameBGRA_Internal* in = (PL_AVFrameBGRA_Internal*)internal; if (pm.type != PipeMaterial::PMT_FRAME) { LOG(ERROR) << "PL_AVFrameBGRA::pay only support PMT_FRAME"; LOG_ERROR << "Only support PMT_FRAME"; return false; } @@ -87,7 +87,7 @@ MB_Frame* frame = (MB_Frame*)pm.buffer; if (frame->type != MB_Frame::MBFT_PTR_AVFRAME) { LOG(ERROR) << "PL_AVFrameBGRA::pay only support MBFT_PTR_AVFRAME"; LOG_ERROR << "Only support MBFT_PTR_AVFRAME"; return false; } RtspFace/PL_AVFrameYUV420.cpp
@@ -11,7 +11,7 @@ struct AVFrameYUV420_Internal { uint8_t buffer[1920*1080*3]; uint8_t buffer[1920*1080*3];//#todo from config size_t buffSize; size_t buffSizeMax; MB_Frame lastFrame; @@ -69,7 +69,7 @@ if (pm.type != PipeMaterial::PMT_FRAME) { LOG(ERROR) << "PL_AVFrameYUV420::pay only support PMT_FRAME"; LOG_ERROR << "Only support PMT_FRAME"; return false; } @@ -79,7 +79,7 @@ MB_Frame* frame = (MB_Frame*)pm.buffer; if (frame->type != MB_Frame::MBFT_PTR_AVFRAME) { LOG(ERROR) << "PL_AVFrameYUV420::pay only support MBFT_PTR_AVFRAME"; LOG_ERROR << "Only support MBFT_PTR_AVFRAME"; return false; } RtspFace/PL_H264Decoder.cpp
@@ -22,14 +22,16 @@ AVCodecContext* pAVCodecContext; AVFrame* pAVFrame;//#todo delete MB_Frame lastFrame; PL_H264Decoder_Config config; H264Decoder_Internal() : //buffSize(0), buffSizeMax(sizeof(buffer)), fmtp_set_to_context(false), payError(true), pAVCodecContext(nullptr), pAVFrame(nullptr), lastFrame() lastFrame(), config() { } @@ -43,8 +45,14 @@ fmtp_set_to_context = false; payError = true; pAVCodecContext = nullptr; pAVFrame = nullptr; MB_Frame _lastFrame; lastFrame = _lastFrame; PL_H264Decoder_Config _config; config = _config; } }; @@ -67,6 +75,12 @@ { H264Decoder_Internal* in = (H264Decoder_Internal*)internal; in->reset(); if (args) { PL_H264Decoder_Config* config = (PL_H264Decoder_Config*)args; in->config = *config; } return true; } @@ -122,19 +136,20 @@ if (!avCodec) { LOG(WARN) << "codec not found!"; LOG_WARN << "codec not found!"; return false; } in->pAVCodecContext = avcodec_alloc_context3(avCodec); in->pAVCodecContext->time_base.num = 1; in->pAVCodecContext->frame_number = 1; in->pAVCodecContext->codec_type = AVMEDIA_TYPE_VIDEO; in->pAVCodecContext->bit_rate = 0; in->pAVCodecContext->time_base.den = 25; in->pAVCodecContext->width = 1920;//#todo get from pm in->pAVCodecContext->height = 1080; // this is only reference for codec //in->pAVCodecContext->frame_number = 1; //in->pAVCodecContext->bit_rate = 0; //in->pAVCodecContext->time_base.num = 1; //in->pAVCodecContext->time_base.den = 25; //in->pAVCodecContext->width = 1920; //in->pAVCodecContext->height = 1080; if (in->pAVCodecContext->extradata == NULL) { @@ -170,7 +185,7 @@ if (av_packet_from_data(&packet, buffer, buffSize) != 0) { LOG(WARN) << "av_packet_from_data error"; LOG_WARN << "av_packet_from_data error"; return false; } @@ -185,7 +200,7 @@ } else { LOG(WARN) << "incomplete frame"; LOG_WARN << "incomplete frame"; return false; } } @@ -208,7 +223,10 @@ size_t numSPropRecords = 0; SPropRecord *p_record = parseSPropParameterSets(fmtp.c_str(), numSPropRecords); if (numSPropRecords < 2) return false;//#todo log { LOG_WARN << "numSPropRecords < 2"; return false; } SPropRecord &sps = p_record[0]; SPropRecord &pps = p_record[1]; @@ -216,8 +234,8 @@ bool ret = initH264DecoderEnv(in, sps.sPropBytes, sps.sPropLength, pps.sPropBytes, pps.sPropLength); if (!ret) { LOG(ERROR) << "PL_H264Decoder::pay initH264DecoderEnv error"; return false; // #todo log LOG_ERROR << "initH264DecoderEnv error"; return false; } else in->fmtp_set_to_context = true; @@ -250,8 +268,12 @@ in->lastFrame.buffSize = sizeof(in->pAVFrame); in->lastFrame.width = in->pAVFrame->width; in->lastFrame.height = in->pAVFrame->height; //in->lastFrame.pts = frame->pts;//#todo if (in->config.resetPTS) gettimeofday(&(in->lastFrame.pts),NULL); else in->lastFrame.pts = frame->pts; } } RtspFace/PL_H264Decoder.h
@@ -3,6 +3,13 @@ #include "PipeLine.h" struct PL_H264Decoder_Config { bool resetPTS; PL_H264Decoder_Config() : resetPTS(true) { } }; class PL_H264Decoder : public PipeLineElem { public: RtspFace/PL_H264Encoder.cpp
@@ -1,5 +1,6 @@ #include "PL_H264Encoder.h" #include "MaterialBuffer.h" #include "logger.h" extern "C" { @@ -12,26 +13,35 @@ #include <libyuv.h> } PL_H264Encoder_Config::PL_H264Encoder_Config() : inBufferSize(2*1024*1024), // 2MByte resetPTS(false), bytesBufferImageWidth(0), bytesBufferImageHeight(0), avc_bit_rate(1*1024*1024*8), //1Mbit avc_fps(25), avc_gop(25), avc_max_b_frames(0), avc_profile(FF_PROFILE_H264_MAIN), av_opt_preset("superfast"), av_opt_tune("") { // av_opt_tune: zerolatency } struct H264Encoder_Internal { uint8_t buffer[1920*1080*3]; uint8_t* buffer; size_t buffSize; size_t buffSizeMax; bool payError; bool ffmpegInited; size_t frameCount; MB_Frame lastFrame; PL_H264Encoder_Config config; AVCodecContext* pAVCodecContext; AVFrame* pAVFrame;//#todo delete AVStream* pAVStream; AVFormatContext* pAVFormatContext; H264Encoder_Internal() : buffSize(0), buffSizeMax(sizeof(buffer)), payError(true), ffmpegInited(false), frameCount(0), pAVCodecContext(nullptr), pAVFrame(nullptr), pAVStream(nullptr), pAVFormatContext(nullptr), lastFrame() buffer(nullptr), buffSize(0), payError(true), ffmpegInited(false), frameCount(0), lastFrame(), config(), pAVCodecContext(nullptr), pAVFrame(nullptr), pAVFormatContext(nullptr) { } @@ -49,10 +59,16 @@ MB_Frame _lastFrame; lastFrame = _lastFrame; PL_H264Encoder_Config _config; config = _config; pAVCodecContext = nullptr; pAVFrame = nullptr; pAVStream = nullptr; pAVFormatContext = nullptr; if (buffer != nullptr) delete[] buffer; buffer = new uint8_t[config.inBufferSize]; } }; @@ -76,6 +92,12 @@ H264Encoder_Internal* in = (H264Encoder_Internal*)internal; in->reset(); if (args != nullptr) { PL_H264Encoder_Config* config = (PL_H264Encoder_Config*)args; in->config = *config; } return true; } @@ -94,24 +116,26 @@ if (!avCodec) { printf("codec not found!\n"); LOG_ERROR << "codec not found!"; return false; } in->pAVCodecContext = avcodec_alloc_context3(avCodec); in->pAVCodecContext->bit_rate = 1*1024*1024*8; // 3MB in->pAVCodecContext->width = 800;//#todo test in->pAVCodecContext->height = 600;//#todo from config in->pAVCodecContext->bit_rate = in->config.avc_bit_rate; in->pAVCodecContext->width = in->config.bytesBufferImageWidth; in->pAVCodecContext->height = in->config.bytesBufferImageHeight; in->pAVCodecContext->time_base.num=1; in->pAVCodecContext->time_base.den=25; in->pAVCodecContext->gop_size = 25; in->pAVCodecContext->max_b_frames = 0; //in->pAVCodecContext->profile = FF_PROFILE_H264_MAIN; in->pAVCodecContext->time_base.den = in->config.avc_fps; in->pAVCodecContext->gop_size = in->config.avc_gop; in->pAVCodecContext->max_b_frames = in->config.avc_max_b_frames; in->pAVCodecContext->profile = in->config.avc_profile; in->pAVCodecContext->pix_fmt = AV_PIX_FMT_YUV420P; av_opt_set(in->pAVCodecContext->priv_data, "preset", "superfast", 0); //av_opt_set(in->pAVCodecContext->priv_data, "tune", "zerolatency", 0); if (!in->config.av_opt_preset.empty()) av_opt_set(in->pAVCodecContext->priv_data, "preset", in->config.av_opt_preset.c_str(), 0); if (!in->config.av_opt_tune.empty()) av_opt_set(in->pAVCodecContext->priv_data, "tune", in->config.av_opt_tune.c_str(), 0); if(avcodec_open2(in->pAVCodecContext, avCodec, NULL) >= 0) { @@ -121,59 +145,45 @@ in->pAVFrame->width = in->pAVCodecContext->width; in->pAVFrame->height = in->pAVCodecContext->height; int ret = av_image_alloc(in->pAVFrame->data, in->pAVFrame->linesize, in->pAVCodecContext->width, in->pAVCodecContext->height, int ret = av_image_alloc(in->pAVFrame->data, in->pAVFrame->linesize, in->pAVCodecContext->width, in->pAVCodecContext->height, in->pAVCodecContext->pix_fmt, 16); if (ret < 0) { printf("av_image_alloc error\n"); LOG_ERROR << "av_image_alloc error"; return false; } } else { printf("avcodec_open2 error\n"); LOG_ERROR << "avcodec_open2 error"; return false; } //int ret = avformat_alloc_output_context2(&(in->pAVFormatContext), NULL, "avi", ""); //if (ret < 0 || in->pAVFormatContext == nullptr) //{ // printf("avformat_alloc_output_context2 error\n"); // return false; //} // //in->pAVStream = avformat_new_stream(in->pAVFormatContext, avCodec); //if (in->pAVStream == nullptr) //{ // printf("avformat_new_stream error\n"); // return false; //} //in->pAVStream->id = in->pAVFormatContext->nb_streams-1; return true; } #define SUBSAMPLE(v, a) ((((v) + (a) - 1)) / (a)) void copyAVFrame(AVFrame* dest, AVFrame* src) { int src_width = src->width; int src_height = src->height; int dst_width = dest->width; int dst_height = dest->height; printf("I420Scale sw=%d, sh=%d, dw=%d, dh=%d\n", src_width,src_height,dst_width, dst_height); libyuv::I420Scale(src->data[0], src_width, src->data[1], SUBSAMPLE(src_width, 2), src->data[2], SUBSAMPLE(src_width, 2), src_width, src_height, dest->data[0], dst_width, dest->data[1], SUBSAMPLE(dst_width, 2), dest->data[2], SUBSAMPLE(dst_width, 2), dst_width, dst_height, libyuv::kFilterNone ); //#test //#define SUBSAMPLE(v, a) ((((v) + (a) - 1)) / (a)) //int src_width = src->width; //int src_height = src->height; //int dst_width = dest->width; //int dst_height = dest->height; //printf("I420Scale sw=%d, sh=%d, dw=%d, dh=%d\n", src_width,src_height,dst_width, dst_height); // //libyuv::I420Scale(src->data[0], src_width, // src->data[1], SUBSAMPLE(src_width, 2), // src->data[2], SUBSAMPLE(src_width, 2), // src_width, src_height, // dest->data[0], dst_width, // dest->data[1], SUBSAMPLE(dst_width, 2), // dest->data[2], SUBSAMPLE(dst_width, 2), // dst_width, dst_height, // libyuv::kFilterNone ); //static size_t f=0; //char fname[50]; //sprintf(fname, "%u.yuv420", ++f); @@ -183,13 +193,12 @@ //fwrite (dest->data[2] , sizeof(char), dst_width * dst_height / 4, pFile); //fclose(pFile); //dest->data[0] = src->data[0]; //dest->data[1] = src->data[1]; //dest->data[2] = src->data[2]; dest->data[0] = src->data[0]; dest->data[1] = src->data[1]; dest->data[2] = src->data[2]; //int height = dest->height; //int width = dest->width; // //memcpy(dest->data[0], src->data[0], height * width); // Y //memcpy(dest->data[1], src->data[1], height * width / 4); // U //memcpy(dest->data[2], src->data[2], height * width / 4); // V @@ -218,14 +227,14 @@ int ret = avcodec_encode_video2(in->pAVCodecContext, &pAVPacket, in->pAVFrame, &gotPacket); if (ret < 0) { printf("avcodec_encode_video2 (1) error=%d\n", ret); LOG_WARN << "avcodec_encode_video2 (1) error=" << ret; return false; } if (gotPacket > 0) { in->frameCount++; printf("Succeed to encode (1) frame=%d, size=%d\n", in->frameCount, pAVPacket.size); LOGP(DEBUG, "Succeed to encode (1) frame=%d, size=%d", in->frameCount, pAVPacket.size); memcpy(in->buffer, pAVPacket.data, pAVPacket.size); in->buffSize = pAVPacket.size; av_free_packet(&pAVPacket); @@ -265,12 +274,15 @@ bool encodeH264(H264Encoder_Internal* in, uint8_t* buffer, timeval pts) { uint16_t width = in->config.bytesBufferImageWidth; uint16_t height = in->config.bytesBufferImageHeight; AVFrame avFrame; avFrame.width = 1920;//#todo avFrame.height = 1080; avFrame.width = width; avFrame.height = height; avFrame.data[0] = buffer; avFrame.data[1] = buffer + 1920*1080; avFrame.data[2] = buffer + 1920*1080 + 1920*1080/4; avFrame.data[1] = buffer + width*height; avFrame.data[2] = buffer + width*height + width*height/4; return encodeH264(in, &avFrame, pts); } @@ -282,10 +294,34 @@ if (!in->ffmpegInited) { MB_Frame* frame = (MB_Frame*)pm.buffer; if (frame != nullptr && frame->buffer != nullptr && (in->config.bytesBufferImageWidth == 0 || in->config.bytesBufferImageHeight == 0)) { if (frame->type == MB_Frame::MBFT_PTR_AVFRAME) { AVFrame* pAVFrame = (AVFrame*)frame->buffer; if (pAVFrame != nullptr) { in->config.bytesBufferImageWidth = pAVFrame->width; in->config.bytesBufferImageHeight = pAVFrame->height; LOGP(NOTICE, "Set codec size from AVFrame width=%d, height=%d", in->config.bytesBufferImageWidth, in->config.bytesBufferImageHeight); } } else if (frame->type == MB_Frame::MBFT_YUV420) { in->config.bytesBufferImageWidth = frame->width; in->config.bytesBufferImageHeight = frame->height; LOGP(NOTICE, "Set codec size from frame width=%d, height=%d", in->config.bytesBufferImageWidth, in->config.bytesBufferImageHeight); } } bool ret = initH264EncoderEnv(in); if (!ret) { printf("initH264EncoderEnv error\n"); LOG_ERROR << "initH264EncoderEnv error"; return false; } else @@ -294,7 +330,7 @@ if (pm.type != PipeMaterial::PMT_FRAME) { printf("PL_H264Encoder::pay only support PMT_FRAME\n"); LOG_ERROR << "Only support PMT_FRAME"; return false; } @@ -311,7 +347,7 @@ ret = encodeH264(in, (uint8_t*)(frame->buffer), frame->pts); else { printf("PL_H264Encoder::pay only support MBFT_PTR_AVFRAME / MBFT_YUV420\n"); LOG_ERROR << "Only support MBFT_PTR_AVFRAME / MBFT_YUV420"; in->payError = true; return false; } @@ -326,6 +362,7 @@ in->lastFrame.width = frame->width; in->lastFrame.height = frame->height; in->lastFrame.pts = frame->pts; //#todo resetPts } return ret; RtspFace/PL_H264Encoder.h
@@ -3,6 +3,26 @@ #include "PipeLine.h" struct PL_H264Encoder_Config { size_t inBufferSize; bool resetPTS; uint16_t bytesBufferImageWidth; // only useful for PMT_BYTES / MBFT_YUV420 / MBFT_BGRA uint16_t bytesBufferImageHeight; size_t avc_bit_rate; uint16_t avc_fps; uint16_t avc_gop; uint16_t avc_max_b_frames; int avc_profile; // FF_PROFILE_H264_BASELINE / FF_PROFILE_H264_MAIN / FF_PROFILE_H264_HIGH std::string av_opt_preset; std::string av_opt_tune; PL_H264Encoder_Config(); }; class PL_H264Encoder : public PipeLineElem { public: RtspFace/PL_RTSPClient.cpp
@@ -7,12 +7,12 @@ void rtsp_client_fmtp_callback(void* arg, const char* val); void rtsp_client_frame_callback(void* arg, uint8_t* buffer, size_t buffSize, timeval presentationTime); void rtsp_client_continue_callback(void* arg); //struct RTSPConfig; //struct PL_RTSPClient_Config; #include "live555/testProgs/testRTSPClient.hpp" struct RTSPClient_Internal { RTSPConfig rtspConfig; PL_RTSPClient_Config rtspConfig; pthread_t live_daemon_thid; char eventLoopWatchVariable; bool live_daemon_running; @@ -50,7 +50,7 @@ void reset() { RTSPConfig _rtspConfig; PL_RTSPClient_Config _rtspConfig; rtspConfig = _rtspConfig; live_daemon_thid = 0; eventLoopWatchVariable = 0; @@ -117,7 +117,7 @@ if (args == nullptr) return false; const RTSPConfig* config = reinterpret_cast<const RTSPConfig*>(args); const PL_RTSPClient_Config* config = reinterpret_cast<const PL_RTSPClient_Config*>(args); RTSPClient_Internal* in = (RTSPClient_Internal*)internal; in->reset(); in->rtspConfig = *config; RtspFace/PL_RTSPClient.h
@@ -4,7 +4,7 @@ #include "PipeLine.h" #include <string> struct RTSPConfig struct PL_RTSPClient_Config { std::string progName; std::string rtspURL; @@ -13,7 +13,7 @@ int tunnelOverHTTPPortNum; // portNumBits void* args; RTSPConfig() : PL_RTSPClient_Config() : progName(), rtspURL() ,aux(true), verbosityLevel(1), tunnelOverHTTPPortNum(0), args(nullptr) { } }; RtspFace/PL_RTSPServer.cpp
@@ -113,7 +113,7 @@ *FrameBuffer = pBuffer; *FrameSize = newBufferSize; LOG(DEBUG) << "send frame size=" << in.buffSize; LOG_DEBUG << "send frame size=" << in.buffSize; } virtual char ReleaseFrame() @@ -125,7 +125,7 @@ int ret = pthread_mutex_unlock(in.frame_mutex); if(ret != 0) { LOG(WARN) << "pthread_mutex_unlock frame_mutex: " << strerror(ret); LOG_WARN << "pthread_mutex_unlock frame_mutex: " << strerror(ret); return 0; } } @@ -143,7 +143,7 @@ int ret = pthread_mutex_lock(in.frame_mutex); if(ret != 0) { LOG(WARN) << "pthread_mutex_lock frame_mutex: " << strerror(ret); LOG_WARN << "pthread_mutex_lock frame_mutex: " << strerror(ret); return; } } @@ -197,7 +197,7 @@ int ret = pthread_create(&(in->live_daemon_thid), NULL, live_daemon_thd, in); if(ret != 0) { LOG(ERROR) << "pthread_create: " << strerror(ret); LOG_ERROR << "pthread_create: " << strerror(ret); return false; } @@ -220,12 +220,12 @@ if (pm.type != PipeMaterial::PMT_FRAME) { LOG(ERROR) << "PL_RTSPServer::pay only support PMT_FRAME"; LOG_ERROR << "PL_RTSPServer::pay only support PMT_FRAME"; return false; } if (in->buffSize > 0) LOG(WARN) << "PL_RTSPServer::pay may lost data size=" << in->buffSize; LOG_WARN << "PL_RTSPServer::pay may lost data size=" << in->buffSize; MB_Frame* frame = (MB_Frame*)pm.buffer; if (frame->buffer == nullptr) RtspFace/PL_SensetimeFaceDetect.cpp
@@ -70,7 +70,7 @@ in->config.point_size_config = CV_DETECT_ENABLE_ALIGN_106; else { LOG(ERROR) << "alignment point size must be 21 or 106"; LOG_ERROR << "alignment point size must be 21 or 106"; return false; } @@ -79,7 +79,7 @@ in->config.point_size_config | CV_FACE_TRACKING_TWO_THREAD); if (cv_result != CV_OK) { LOG(ERROR) << "cv_face_create_tracker failed, error code" << cv_result; LOG_ERROR << "cv_face_create_tracker failed, error code" << cv_result; return false; } @@ -87,11 +87,11 @@ cv_result = cv_face_track_set_detect_face_cnt_limit(in->handle_track, in->config.detect_face_cnt_limit, &val); if (cv_result != CV_OK) { LOG(ERROR) << "cv_face_track_set_detect_face_cnt_limit failed, error : " << cv_result; LOG_ERROR << "cv_face_track_set_detect_face_cnt_limit failed, error : " << cv_result; return false; } else LOG(ERROR) << "detect face count limit : " << val; LOG_ERROR << "detect face count limit : " << val; return true; } @@ -120,7 +120,7 @@ CV_FACE_UP, &p_face, &face_count); if (cv_result != CV_OK) { LOG(ERROR) << "cv_face_track failed, error : " << cv_result; LOG_ERROR << "cv_face_track failed, error : " << cv_result; cv_face_release_tracker_result(p_face, face_count); return -1; } @@ -130,11 +130,11 @@ cv::Mat yMat(cv::Size(1920,1080), CV_8UC1, buffer); for (int i = 0; i < face_count; i++) { LOGP(DEBUG, "face: %d-----[%d, %d, %d, %d]-----id: %d\n", i, LOGP(DEBUG, "face: %d-----[%d, %d, %d, %d]-----id: %d", i, p_face[i].rect.left, p_face[i].rect.top, p_face[i].rect.right, p_face[i].rect.bottom, p_face[i].ID); LOGP(DEBUG, "face pose: [yaw: %.2f, pitch: %.2f, roll: %.2f, eye distance: %.2f]\n", LOGP(DEBUG, "face pose: [yaw: %.2f, pitch: %.2f, roll: %.2f, eye distance: %.2f]", p_face[i].yaw, p_face[i].pitch, p_face[i].roll, p_face[i].eye_dist); @@ -180,7 +180,7 @@ if (pm.type != PipeMaterial::PMT_FRAME) { LOG(ERROR) << "PL_H264Encoder::pay only support PMT_FRAME"; LOG_ERROR << "PL_H264Encoder::pay only support PMT_FRAME"; return false; } @@ -190,7 +190,7 @@ MB_Frame* frame = (MB_Frame*)pm.buffer; if (frame->type != MB_Frame::MBFT_YUV420) { LOG(ERROR) << "PL_H264Encoder::pay only support MBFT_YUV420"; LOG_ERROR << "PL_H264Encoder::pay only support MBFT_YUV420"; return false; } RtspFace/PipeLine.cpp
@@ -90,16 +90,16 @@ PipeDebugger(PipeLine* _pipeLine) : pipeLine(_pipeLine), retElem(nullptr), pm(nullptr) { LOG(DEBUG) << "pipe line begin"; LOG_DEBUG << "pipe line begin"; } ~PipeDebugger() { bool retOK = (*(pipeLine->elems).rbegin() == retElem); if (retOK) LOG(DEBUG) << "pipe line end, ret OK"; LOG_DEBUG << "pipe line end, ret OK"; else LOG(WARN) << "pipe line end, ret ERROR"; LOG_WARN << "pipe line end, ret ERROR"; } }; RtspFace/live555/testProgs/testRTSPClient.hpp
@@ -33,10 +33,11 @@ // Even though we're not going to be doing anything with the incoming data, we still need to receive it. // Define the size of the buffer that we'll use: #define DUMMY_SINK_RECEIVE_BUFFER_SIZE 1920*1080*3 #define DUMMY_SINK_RECEIVE_BUFFER_SIZE 1920*1080*3//#todo // If you don't want to see debugging output for each received frame, then comment out the following line: #define DEBUG_PRINT_EACH_RECEIVED_FRAME 1 //#define DEBUG_PRINT_EACH_RECEIVED_FRAME 1 //#define DEBUG_PRINT_NPT 1 // Forward function definitions: @@ -52,7 +53,7 @@ // called at the end of a stream's expected duration (if the stream has not already signaled its end using a RTCP "BYE") // The main streaming routine (for each "rtsp://" URL): void openURL(UsageEnvironment& env, const RTSPConfig& _rtspConfig); void openURL(UsageEnvironment& env, const PL_RTSPClient_Config& _rtspConfig); // Used to iterate through each stream's 'subsessions', setting up each one: void setupNextSubsession(RTSPClient* rtspClient); @@ -61,34 +62,39 @@ void shutdownStream(RTSPClient* rtspClient, int exitCode = 1); // A function that outputs a string that identifies each stream (for debugging output). Modify this if you wish: UsageEnvironment& operator<<(UsageEnvironment& env, const RTSPClient& rtspClient) { return env << "[URL:\"" << rtspClient.url() << "\"]: "; log4cpp::CategoryStream& operator<<(log4cpp::CategoryStream& logRoot, const RTSPClient& rtspClient) { return logRoot << "[URL:\"" << rtspClient.url() << "\"]: "; } // A function that outputs a string that identifies each subsession (for debugging output). Modify this if you wish: UsageEnvironment& operator<<(UsageEnvironment& env, const MediaSubsession& subsession) { return env << subsession.mediumName() << "/" << subsession.codecName(); log4cpp::CategoryStream& operator<<(log4cpp::CategoryStream& logRoot, const MediaSubsession& subsession) { return logRoot << subsession.mediumName() << "/" << subsession.codecName(); } void usage(UsageEnvironment& env, char const* progName) { env << "Usage: " << progName << " <rtsp-url-1> ... <rtsp-url-N>\n"; env << "\t(where each <rtsp-url-i> is a \"rtsp://\" URL)\n"; void usage(UsageEnvironment& env, char const* progName) { LOG_DEBUG << "Usage: " << progName << " <rtsp-url-1> ... <rtsp-url-N>"; LOG_DEBUG << "\t(where each <rtsp-url-i> is a \"rtsp://\" URL)"; } char eventLoopWatchVariable = 0; int test_main(int argc, char** argv) { int test_main(int argc, char** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); UsageEnvironment* env = BasicUsageEnvironment::createNew(*scheduler); // We need at least one "rtsp://" URL argument: if (argc < 2) { if (argc < 2) { usage(*env, argv[0]); return 1; } RTSPConfig rtspConfig; PL_RTSPClient_Config rtspConfig; rtspConfig.progName = argv[0]; rtspConfig.rtspURL = ""; rtspConfig.aux = false; @@ -97,7 +103,8 @@ rtspConfig.args = nullptr; // There are argc-1 URLs: argv[1] through argv[argc-1]. Open and start streaming each one: for (int i = 1; i <= argc-1; ++i) { for (int i = 1; i <= argc-1; ++i) { rtspConfig.rtspURL = argv[i]; openURL(*env, rtspConfig); } @@ -119,7 +126,8 @@ // Define a class to hold per-stream state that we maintain throughout each stream's lifetime: class StreamClientState { class StreamClientState { public: StreamClientState(); virtual ~StreamClientState(); @@ -137,18 +145,19 @@ // showing how to play multiple streams, concurrently, we can't do that. Instead, we have to have a separate "StreamClientState" // structure for each "RTSPClient". To do this, we subclass "RTSPClient", and add a "StreamClientState" field to the subclass: class ourRTSPClient: public RTSPClient { class ourRTSPClient: public RTSPClient { public: static ourRTSPClient* createNew(UsageEnvironment& env, const RTSPConfig& _rtspConfig); static ourRTSPClient* createNew(UsageEnvironment& env, const PL_RTSPClient_Config& _rtspConfig); protected: ourRTSPClient(UsageEnvironment& env, const RTSPConfig& _rtspConfig); ourRTSPClient(UsageEnvironment& env, const PL_RTSPClient_Config& _rtspConfig); // called only by createNew(); virtual ~ourRTSPClient(); public: StreamClientState scs; const RTSPConfig& rtspConfig; const PL_RTSPClient_Config& rtspConfig; }; // Define a data sink (a subclass of "MediaSink") to receive the data for each subsession (i.e., each audio or video 'substream'). @@ -160,12 +169,12 @@ { public: static DummySink* createNew(UsageEnvironment& env, const RTSPConfig& _rtspConfig, const PL_RTSPClient_Config& _rtspConfig, MediaSubsession& subsession, // identifies the kind of data that's being received char const* streamId = NULL); // identifies the stream itself (optional) private: DummySink(UsageEnvironment& env, const RTSPConfig& _rtspConfig, MediaSubsession& subsession, char const* streamId); DummySink(UsageEnvironment& env, const PL_RTSPClient_Config& _rtspConfig, MediaSubsession& subsession, char const* streamId); // called only by "createNew()" virtual ~DummySink(); @@ -177,7 +186,7 @@ struct timeval presentationTime, unsigned durationInMicroseconds); public: const RTSPConfig& rtspConfig; const PL_RTSPClient_Config& rtspConfig; private: // redefined virtual functions: @@ -191,14 +200,14 @@ static unsigned rtspClientCount = 0; // Counts how many streams (i.e., "RTSPClient"s) are currently in use. void openURL(UsageEnvironment& env, const RTSPConfig& _rtspConfig) void openURL(UsageEnvironment& env, const PL_RTSPClient_Config& _rtspConfig) { // Begin by creating a "RTSPClient" object. Note that there is a separate "RTSPClient" object for each stream that we wish // to receive (even if more than stream uses the same "rtsp://" URL). RTSPClient* rtspClient = ourRTSPClient::createNew(env, _rtspConfig); if (rtspClient == NULL) { env << "Failed to create a RTSP client for URL \"" << _rtspConfig.rtspURL.c_str() << "\": " << env.getResultMsg() << "\n"; LOG_ERROR << "Failed to create a RTSP client for URL \"" << _rtspConfig.rtspURL.c_str() << "\": " << env.getResultMsg(); return; } @@ -222,25 +231,25 @@ if (resultCode != 0) { env << *rtspClient << "Failed to get a SDP description: " << resultString << "\n"; LOG_WARN << *rtspClient << "Failed to get a SDP description: " << resultString; delete[] resultString; break; } char* const sdpDescription = resultString; env << *rtspClient << "Got a SDP description:\n" << sdpDescription << "\n"; LOG_INFO << *rtspClient << "Got a SDP description:\n" << sdpDescription; // Create a media session object from this SDP description: scs.session = MediaSession::createNew(env, sdpDescription); delete[] sdpDescription; // because we don't need it anymore if (scs.session == NULL) { env << *rtspClient << "Failed to create a MediaSession object from the SDP description: " << env.getResultMsg() << "\n"; LOG_ERROR << *rtspClient << "Failed to create a MediaSession object from the SDP description: " << env.getResultMsg(); break; } else if (!scs.session->hasSubsessions()) { env << *rtspClient << "This session has no media subsessions (i.e., no \"m=\" lines)\n"; LOG_WARN << *rtspClient << "This session has no media subsessions (i.e., no \"m=\" lines)"; break; } @@ -250,7 +259,8 @@ scs.iter = new MediaSubsessionIterator(*scs.session); setupNextSubsession(rtspClient); return; } while (0); } while (0); // An unrecoverable error occurred with this stream. shutdownStream(rtspClient); @@ -262,18 +272,21 @@ StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias scs.subsession = scs.iter->next(); if (scs.subsession != NULL) { if (!scs.subsession->initiate()) { env << *rtspClient << "Failed to initiate the \"" << *scs.subsession << "\" subsession: " << env.getResultMsg() << "\n"; if (scs.subsession != NULL) { if (!scs.subsession->initiate()) { LOG_ERROR << *rtspClient << "Failed to initiate the \"" << *scs.subsession << "\" subsession: " << env.getResultMsg(); setupNextSubsession(rtspClient); // give up on this subsession; go to the next one } else { env << *rtspClient << "Initiated the \"" << *scs.subsession << "\" subsession ("; if (scs.subsession->rtcpIsMuxed()) { env << "client port " << scs.subsession->clientPortNum(); } else { env << "client ports " << scs.subsession->clientPortNum() << "-" << scs.subsession->clientPortNum()+1; } env << ")\n"; else { LOG_INFO << *rtspClient << "Initiated the \"" << *scs.subsession << "\" subsession ("; if (scs.subsession->rtcpIsMuxed()) LOG_INFO << "client port " << scs.subsession->clientPortNum(); else LOG_INFO << "client ports " << scs.subsession->clientPortNum() << "-" << scs.subsession->clientPortNum()+1; LOG_INFO << ")"; // Continue setting up this subsession, by sending a RTSP "SETUP" command: rtspClient->sendSetupCommand(*scs.subsession, continueAfterSETUP, False, REQUEST_STREAMING_OVER_TCP); @@ -282,32 +295,41 @@ } // We've finished setting up all of the subsessions. Now, send a RTSP "PLAY" command to start the streaming: if (scs.session->absStartTime() != NULL) { if (scs.session->absStartTime() != NULL) { // Special case: The stream is indexed by 'absolute' time, so send an appropriate "PLAY" command: rtspClient->sendPlayCommand(*scs.session, continueAfterPLAY, scs.session->absStartTime(), scs.session->absEndTime()); } else { } else { scs.duration = scs.session->playEndTime() - scs.session->playStartTime(); rtspClient->sendPlayCommand(*scs.session, continueAfterPLAY); } } void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, char* resultString) { do { void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, char* resultString) { do { UsageEnvironment& env = rtspClient->envir(); // alias StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias if (resultCode != 0) { env << *rtspClient << "Failed to set up the \"" << *scs.subsession << "\" subsession: " << resultString << "\n"; if (resultCode != 0) { LOG_ERROR << *rtspClient << "Failed to set up the \"" << *scs.subsession << "\" subsession: " << resultString; break; } env << *rtspClient << "Set up the \"" << *scs.subsession << "\" subsession ("; if (scs.subsession->rtcpIsMuxed()) { env << "client port " << scs.subsession->clientPortNum(); } else { env << "client ports " << scs.subsession->clientPortNum() << "-" << scs.subsession->clientPortNum()+1; LOG_INFO << *rtspClient << "Set up the \"" << *scs.subsession << "\" subsession ("; if (scs.subsession->rtcpIsMuxed()) { LOG_INFO << "client port " << scs.subsession->clientPortNum(); } env << ")\n"; else { LOG_INFO << "client ports " << scs.subsession->clientPortNum() << "-" << scs.subsession->clientPortNum()+1; } LOG_INFO << ")"; // Having successfully setup the subsession, create a data sink for it, and call "startPlaying()" on it. // (This will prepare the data sink to receive data; the actual flow of data from the client won't start happening until later, @@ -316,36 +338,42 @@ scs.subsession->sink = DummySink::createNew(env, ((ourRTSPClient*)rtspClient)->rtspConfig, *scs.subsession, rtspClient->url()); // perhaps use your own custom "MediaSink" subclass instead if (scs.subsession->sink == NULL) { env << *rtspClient << "Failed to create a data sink for the \"" << *scs.subsession << "\" subsession: " << env.getResultMsg() << "\n"; if (scs.subsession->sink == NULL) { LOG_ERROR << *rtspClient << "Failed to create a data sink for the \"" << *scs.subsession << "\" subsession: " << env.getResultMsg(); break; } env << *rtspClient << "Created a data sink for the \"" << *scs.subsession << "\" subsession\n"; LOG_INFO << *rtspClient << "Created a data sink for the \"" << *scs.subsession << "\" subsession"; scs.subsession->miscPtr = rtspClient; // a hack to let subsession handler functions get the "RTSPClient" from the subsession scs.subsession->sink->startPlaying(*(scs.subsession->readSource()), subsessionAfterPlaying, scs.subsession); // Also set a handler to be called if a RTCP "BYE" arrives for this subsession: if (scs.subsession->rtcpInstance() != NULL) { if (scs.subsession->rtcpInstance() != NULL) { scs.subsession->rtcpInstance()->setByeHandler(subsessionByeHandler, scs.subsession); } } while (0); } while (0); delete[] resultString; // Set up the next subsession, if any: setupNextSubsession(rtspClient); } void continueAfterPLAY(RTSPClient* rtspClient, int resultCode, char* resultString) { void continueAfterPLAY(RTSPClient* rtspClient, int resultCode, char* resultString) { Boolean success = False; do { do { UsageEnvironment& env = rtspClient->envir(); // alias StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias if (resultCode != 0) { env << *rtspClient << "Failed to start playing session: " << resultString << "\n"; if (resultCode != 0) { LOG_ERROR << *rtspClient << "Failed to start playing session: " << resultString; break; } @@ -353,24 +381,28 @@ // using a RTCP "BYE"). This is optional. If, instead, you want to keep the stream active - e.g., so you can later // 'seek' back within it and do another RTSP "PLAY" - then you can omit this code. // (Alternatively, if you don't want to receive the entire stream, you could set this timer for some shorter value.) if (scs.duration > 0) { if (scs.duration > 0) { unsigned const delaySlop = 2; // number of seconds extra to delay, after the stream's expected duration. (This is optional.) scs.duration += delaySlop; unsigned uSecsToDelay = (unsigned)(scs.duration*1000000); scs.streamTimerTask = env.taskScheduler().scheduleDelayedTask(uSecsToDelay, (TaskFunc*)streamTimerHandler, rtspClient); } env << *rtspClient << "Started playing session"; if (scs.duration > 0) { env << " (for up to " << scs.duration << " seconds)"; LOG_INFO << *rtspClient << "Started playing session"; if (scs.duration > 0) { LOG_INFO << " (for up to " << scs.duration << " seconds)"; } env << "...\n"; LOG_INFO << "..."; success = True; } while (0); } while (0); delete[] resultString; if (!success) { if (!success) { // An unrecoverable error occurred with this stream. shutdownStream(rtspClient); } @@ -379,7 +411,8 @@ // Implementation of the other event handlers: void subsessionAfterPlaying(void* clientData) { void subsessionAfterPlaying(void* clientData) { MediaSubsession* subsession = (MediaSubsession*)clientData; RTSPClient* rtspClient = (RTSPClient*)(subsession->miscPtr); @@ -390,7 +423,8 @@ // Next, check whether *all* subsessions' streams have now been closed: MediaSession& session = subsession->parentSession(); MediaSubsessionIterator iter(session); while ((subsession = iter.next()) != NULL) { while ((subsession = iter.next()) != NULL) { if (subsession->sink != NULL) return; // this subsession is still active } @@ -398,18 +432,20 @@ shutdownStream(rtspClient); } void subsessionByeHandler(void* clientData) { void subsessionByeHandler(void* clientData) { MediaSubsession* subsession = (MediaSubsession*)clientData; RTSPClient* rtspClient = (RTSPClient*)subsession->miscPtr; UsageEnvironment& env = rtspClient->envir(); // alias env << *rtspClient << "Received RTCP \"BYE\" on \"" << *subsession << "\" subsession\n"; LOG_INFO << *rtspClient << "Received RTCP \"BYE\" on \"" << *subsession << "\" subsession"; // Now act as if the subsession had closed: subsessionAfterPlaying(subsession); } void streamTimerHandler(void* clientData) { void streamTimerHandler(void* clientData) { ourRTSPClient* rtspClient = (ourRTSPClient*)clientData; StreamClientState& scs = rtspClient->scs; // alias @@ -419,22 +455,27 @@ shutdownStream(rtspClient); } void shutdownStream(RTSPClient* rtspClient, int exitCode) { void shutdownStream(RTSPClient* rtspClient, int exitCode) { UsageEnvironment& env = rtspClient->envir(); // alias StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias // First, check whether any subsessions have still to be closed: if (scs.session != NULL) { if (scs.session != NULL) { Boolean someSubsessionsWereActive = False; MediaSubsessionIterator iter(*scs.session); MediaSubsession* subsession; while ((subsession = iter.next()) != NULL) { if (subsession->sink != NULL) { while ((subsession = iter.next()) != NULL) { if (subsession->sink != NULL) { Medium::close(subsession->sink); subsession->sink = NULL; if (subsession->rtcpInstance() != NULL) { if (subsession->rtcpInstance() != NULL) { subsession->rtcpInstance()->setByeHandler(NULL, NULL); // in case the server sends a RTCP "BYE" while handling "TEARDOWN" } @@ -442,18 +483,20 @@ } } if (someSubsessionsWereActive) { if (someSubsessionsWereActive) { // Send a RTSP "TEARDOWN" command, to tell the server to shutdown the stream. // Don't bother handling the response to the "TEARDOWN". rtspClient->sendTeardownCommand(*scs.session, NULL); } } env << *rtspClient << "Closing the stream.\n"; LOG_NOTICE << *rtspClient << "Closing the stream."; Medium::close(rtspClient); // Note that this will also cause this stream's "StreamClientState" structure to get reclaimed. if (--rtspClientCount == 0) { if (--rtspClientCount == 0) { // The final stream has ended, so exit the application now. // (Of course, if you're embedding this code into your own application, you might want to comment this out, // and replace it with "eventLoopWatchVariable = 1;", so that we leave the LIVE555 event loop, and continue running "main()".) @@ -464,30 +507,34 @@ // Implementation of "ourRTSPClient": ourRTSPClient* ourRTSPClient::createNew(UsageEnvironment& env, const RTSPConfig& _rtspConfig) ourRTSPClient* ourRTSPClient::createNew(UsageEnvironment& env, const PL_RTSPClient_Config& _rtspConfig) { return new ourRTSPClient(env, _rtspConfig); } ourRTSPClient::ourRTSPClient(UsageEnvironment& env, const RTSPConfig& _rtspConfig) ourRTSPClient::ourRTSPClient(UsageEnvironment& env, const PL_RTSPClient_Config& _rtspConfig) : RTSPClient(env, _rtspConfig.rtspURL.c_str(), _rtspConfig.verbosityLevel, _rtspConfig.progName.c_str(), _rtspConfig.tunnelOverHTTPPortNum, -1), rtspConfig(_rtspConfig) { } ourRTSPClient::~ourRTSPClient() { ourRTSPClient::~ourRTSPClient() { } // Implementation of "StreamClientState": StreamClientState::StreamClientState() : iter(NULL), session(NULL), subsession(NULL), streamTimerTask(NULL), duration(0.0) { : iter(NULL), session(NULL), subsession(NULL), streamTimerTask(NULL), duration(0.0) { } StreamClientState::~StreamClientState() { StreamClientState::~StreamClientState() { delete iter; if (session != NULL) { if (session != NULL) { // We also need to delete "session", and unschedule "streamTimerTask" (if set) UsageEnvironment& env = session->envir(); // alias @@ -498,12 +545,12 @@ // Implementation of "DummySink": DummySink* DummySink::createNew(UsageEnvironment& env, const RTSPConfig& _rtspConfig, MediaSubsession& subsession, char const* streamId) DummySink* DummySink::createNew(UsageEnvironment& env, const PL_RTSPClient_Config& _rtspConfig, MediaSubsession& subsession, char const* streamId) { return new DummySink(env, _rtspConfig, subsession, streamId); } DummySink::DummySink(UsageEnvironment& env, const RTSPConfig& _rtspConfig, MediaSubsession& subsession, char const* streamId) DummySink::DummySink(UsageEnvironment& env, const PL_RTSPClient_Config& _rtspConfig, MediaSubsession& subsession, char const* streamId) : MediaSink(env), rtspConfig(_rtspConfig), fSubsession(subsession) { fStreamId = strDup(streamId); @@ -512,7 +559,10 @@ // ffmpeg need AUX header if (rtspConfig.aux) { fReceiveBuffer[0]=0x00; fReceiveBuffer[1]=0x00; fReceiveBuffer[2]=0x00; fReceiveBuffer[3]=0x01; fReceiveBuffer[0]=0x00; fReceiveBuffer[1]=0x00; fReceiveBuffer[2]=0x00; fReceiveBuffer[3]=0x01; } //parse sdp @@ -524,7 +574,8 @@ //std::cout << strFmtp << std::endl; } DummySink::~DummySink() { DummySink::~DummySink() { delete[] fReceiveBuffer; delete[] fStreamId; } @@ -546,22 +597,26 @@ } void DummySink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned /*durationInMicroseconds*/) { struct timeval presentationTime, unsigned /*durationInMicroseconds*/) { // We've just received a frame of data. (Optionally) print out information about it: #ifdef DEBUG_PRINT_EACH_RECEIVED_FRAME if (fStreamId != NULL) envir() << "Stream \"" << fStreamId << "\"; "; envir() << fSubsession.mediumName() << "/" << fSubsession.codecName() << ":\tReceived " << frameSize << " bytes"; if (numTruncatedBytes > 0) envir() << " (with " << numTruncatedBytes << " bytes truncated)"; if (fStreamId != NULL) LOG_DEBUG << "Stream \"" << fStreamId << "\"; "; LOG_DEBUG << "\t" << fSubsession.mediumName() << "/" << fSubsession.codecName() << ":\tReceived " << frameSize << " bytes"; if (numTruncatedBytes > 0) LOG_DEBUG << " (with " << numTruncatedBytes << " bytes truncated)"; char uSecsStr[6+1]; // used to output the 'microseconds' part of the presentation time sprintf(uSecsStr, "%06u", (unsigned)presentationTime.tv_usec); envir() << ".\tPresentation time: " << (int)presentationTime.tv_sec << "." << uSecsStr; if (fSubsession.rtpSource() != NULL && !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) { envir() << "!"; // mark the debugging output to indicate that this presentation time is not RTCP-synchronized LOG_DEBUG << "\tPresentation time: " << (int)presentationTime.tv_sec << "." << uSecsStr; if (fSubsession.rtpSource() != NULL && !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) { LOG_DEBUG << "\tPTS not RTCP-synchronized"; // mark the debugging output to indicate that this presentation time is not RTCP-synchronized } #ifdef DEBUG_PRINT_NPT envir() << "\tNPT: " << fSubsession.getNormalPlayTime(presentationTime); LOG_DEBUG << "\tNPT: " << fSubsession.getNormalPlayTime(presentationTime); #endif envir() << "\n"; #endif // Then continue, to request the next frame of data: @@ -570,7 +625,8 @@ Boolean DummySink::continuePlaying() { if (fSource == NULL) return False; // sanity check (should not happen) if (fSource == NULL) return False; // sanity check (should not happen) rtsp_client_continue_callback(rtspConfig.args); RtspFace/logger.h
@@ -20,6 +20,12 @@ #define LOG(__level) log4cpp::Category::getRoot() << log4cpp::Priority::__level << __FILE__ << ":" << __LINE__ << "\t" #define LOGP(__level, __format, arg...) log4cpp::Category::getRoot().log(log4cpp::Priority::__level, "%s:%d\t" __format, __FILE__, __LINE__, ##arg); #define LOG_DEBUG LOG(DEBUG) // Debug message do not care in any production environment #define LOG_INFO LOG(INFO) // Not significant event but useful for deal with online problem #define LOG_NOTICE LOG(NOTICE) // Important event #define LOG_WARN LOG(WARN) // Important event or input which will lead to errors #define LOG_ERROR LOG(ERROR) // Error message means program running in an abnormal (not expected) way inline void initLogger(int verbose) { // initialize log4cpp @@ -42,7 +48,7 @@ default: log.setPriority(log4cpp::Priority::NOTICE); break; } LOG(INFO) << "level:" << log4cpp::Priority::getPriorityName(log.getPriority()); LOG_INFO << "level:" << log4cpp::Priority::getPriorityName(log.getPriority()); } #endif RtspFace/main.cpp
@@ -28,7 +28,7 @@ { PL_RTSPClient* rtspClient = (PL_RTSPClient*)pipeLine.push_elem("PL_RTSPClient"); RTSPConfig rtspConfig; PL_RTSPClient_Config rtspConfig; rtspConfig.progName = argv[0]; rtspConfig.rtspURL = argv[1]; rtspConfig.aux = true; // ffmpeg need aux, but live555 not @@ -38,7 +38,7 @@ bool ret = rtspClient->init(&rtspConfig); if (!ret) { LOG(ERROR) << "rtspClient.init error"; LOG_ERROR << "rtspClient.init error"; exit(EXIT_FAILURE); } } @@ -65,7 +65,7 @@ // bool ret = queue1->init(&config); // if (!ret) // { // LOG(ERROR) << "queue1.init error"; // LOG_ERROR << "queue1.init error"; // exit(EXIT_FAILURE); // } //} @@ -76,20 +76,19 @@ } { RTSPServerConfig config; PL_RTSPServer* rtspServer = (PL_RTSPServer*)pipeLine.push_elem("PL_RTSPServer"); bool ret = rtspServer->init(&config); bool ret = rtspServer->init(nullptr); if (!ret) { LOG(ERROR) << "rtspServer.init error"; LOG_ERROR << "rtspServer.init error"; exit(EXIT_FAILURE); } } while(true) { //LOG(ERROR) << "begin pipe"; //LOG_ERROR << "begin pipe"; pipeLine.pipe(); //LOG(ERROR) << "end pipe"; //LOG_ERROR << "end pipe"; } }