From cc445067d1f61e12dbea4e6458f2c85ba58f01bf Mon Sep 17 00:00:00 2001
From: houxiao <houxiao@454eff88-639b-444f-9e54-f578c98de674>
Date: 星期五, 30 十二月 2016 14:28:14 +0800
Subject: [PATCH] fix config, fix some log and todo

---
 RtspFace/main.cpp                             |   15 
 RtspFace/PL_H264Decoder.h                     |    7 
 RtspFace/PL_RTSPClient.cpp                    |    8 
 RtspFace/PL_H264Encoder.h                     |   20 +
 RtspFace/PL_SensetimeFaceDetect.cpp           |   18 
 RtspFace/PipeLine.cpp                         |    6 
 RtspFace/PL_RTSPServer.cpp                    |   12 
 RtspFace/PL_H264Decoder.cpp                   |   54 ++
 RtspFace/PL_H264Encoder.cpp                   |  177 +++++---
 RtspFace/logger.h                             |    8 
 RtspFace/live555/testProgs/testRTSPClient.hpp |  702 +++++++++++++++++++++-----------------
 RtspFace/PL_RTSPClient.h                      |    4 
 RtspFace/PL_AVFrameBGRA.cpp                   |   10 
 RtspFace/PL_AVFrameYUV420.cpp                 |    6 
 14 files changed, 597 insertions(+), 450 deletions(-)

diff --git a/RtspFace/PL_AVFrameBGRA.cpp b/RtspFace/PL_AVFrameBGRA.cpp
index 4b0ba9e..7ec2d11 100644
--- a/RtspFace/PL_AVFrameBGRA.cpp
+++ b/RtspFace/PL_AVFrameBGRA.cpp
@@ -13,7 +13,7 @@
 
 struct PL_AVFrameBGRA_Internal
 {
-	uint8_t buffer[1920*1080*4];//#todo
+	uint8_t buffer[1920*1080*4];//#todo from config
 	size_t buffSize;
 	size_t buffSizeMax;
 	MB_Frame lastFrame;
@@ -69,15 +69,15 @@
 	
 }
 
-#define SUBSAMPLE(v, a) ((((v) + (a) - 1)) / (a))
-
 bool PL_AVFrameBGRA::pay(const PipeMaterial& pm)
 {
+#define SUBSAMPLE(v, a) ((((v) + (a) - 1)) / (a))
+
 	PL_AVFrameBGRA_Internal* in = (PL_AVFrameBGRA_Internal*)internal;
 	
 	if (pm.type != PipeMaterial::PMT_FRAME)
 	{
-		LOG(ERROR) << "PL_AVFrameBGRA::pay only support PMT_FRAME";
+		LOG_ERROR << "Only support PMT_FRAME";
 		return false;
 	}
 	
@@ -87,7 +87,7 @@
 	MB_Frame* frame = (MB_Frame*)pm.buffer;
 	if (frame->type != MB_Frame::MBFT_PTR_AVFRAME)
 	{
-		LOG(ERROR) << "PL_AVFrameBGRA::pay only support MBFT_PTR_AVFRAME";
+		LOG_ERROR << "Only support MBFT_PTR_AVFRAME";
 		return false;
 	}
 
diff --git a/RtspFace/PL_AVFrameYUV420.cpp b/RtspFace/PL_AVFrameYUV420.cpp
index 93e0507..98bf843 100644
--- a/RtspFace/PL_AVFrameYUV420.cpp
+++ b/RtspFace/PL_AVFrameYUV420.cpp
@@ -11,7 +11,7 @@
 
 struct AVFrameYUV420_Internal
 {
-	uint8_t buffer[1920*1080*3];
+	uint8_t buffer[1920*1080*3];//#todo from config
 	size_t buffSize;
 	size_t buffSizeMax;
 	MB_Frame lastFrame;
@@ -69,7 +69,7 @@
 	
 	if (pm.type != PipeMaterial::PMT_FRAME)
 	{
-		LOG(ERROR) << "PL_AVFrameYUV420::pay only support PMT_FRAME";
+		LOG_ERROR << "Only support PMT_FRAME";
 		return false;
 	}
 	
@@ -79,7 +79,7 @@
 	MB_Frame* frame = (MB_Frame*)pm.buffer;
 	if (frame->type != MB_Frame::MBFT_PTR_AVFRAME)
 	{
-		LOG(ERROR) << "PL_AVFrameYUV420::pay only support MBFT_PTR_AVFRAME";
+		LOG_ERROR << "Only support MBFT_PTR_AVFRAME";
 		return false;
 	}
 
diff --git a/RtspFace/PL_H264Decoder.cpp b/RtspFace/PL_H264Decoder.cpp
index 3f61cce..55a130c 100644
--- a/RtspFace/PL_H264Decoder.cpp
+++ b/RtspFace/PL_H264Decoder.cpp
@@ -22,14 +22,16 @@
 
 	AVCodecContext* pAVCodecContext;
 	AVFrame* pAVFrame;//#todo delete
+	
 	MB_Frame lastFrame;
+	PL_H264Decoder_Config config;
 	
 	H264Decoder_Internal() : 
 		//buffSize(0), buffSizeMax(sizeof(buffer)), 
 		fmtp_set_to_context(false), 
 		payError(true), 
 		pAVCodecContext(nullptr), pAVFrame(nullptr), 
-		lastFrame()
+		lastFrame(), config()
 	{
 	}
 	
@@ -43,8 +45,14 @@
 		fmtp_set_to_context = false;
 		payError = true;
 		
+		pAVCodecContext = nullptr;
+		pAVFrame = nullptr;
+		
 		MB_Frame _lastFrame;
 		lastFrame = _lastFrame;
+		
+		PL_H264Decoder_Config _config;
+		config = _config;
 	}
 };
 
@@ -67,6 +75,12 @@
 {
 	H264Decoder_Internal* in = (H264Decoder_Internal*)internal;
 	in->reset();
+	
+	if (args)
+	{
+		PL_H264Decoder_Config* config = (PL_H264Decoder_Config*)args;
+		in->config = *config;
+	}
 	
 	return true;
 }
@@ -122,19 +136,20 @@
 
 	if (!avCodec)   
 	{  
-		LOG(WARN) << "codec not found!";  
+		LOG_WARN << "codec not found!";  
 		return false;  
 	}  
 
 	in->pAVCodecContext = avcodec_alloc_context3(avCodec);
-
-	in->pAVCodecContext->time_base.num = 1;
-	in->pAVCodecContext->frame_number = 1;
+	
 	in->pAVCodecContext->codec_type = AVMEDIA_TYPE_VIDEO;
-	in->pAVCodecContext->bit_rate = 0;
-	in->pAVCodecContext->time_base.den = 25;
-	in->pAVCodecContext->width = 1920;//#todo get from pm
-	in->pAVCodecContext->height = 1080;
+	// this is only reference for codec
+	//in->pAVCodecContext->frame_number = 1;
+	//in->pAVCodecContext->bit_rate = 0;
+	//in->pAVCodecContext->time_base.num = 1;
+	//in->pAVCodecContext->time_base.den = 25;
+	//in->pAVCodecContext->width = 1920;
+	//in->pAVCodecContext->height = 1080;
 
 	if (in->pAVCodecContext->extradata == NULL)  
     {  
@@ -170,7 +185,7 @@
 
 	if (av_packet_from_data(&packet, buffer, buffSize) != 0)
 	{  
-		LOG(WARN) << "av_packet_from_data error";
+		LOG_WARN << "av_packet_from_data error";
 		return false;
 	}
 	
@@ -185,7 +200,7 @@
 	}
 	else
 	{
-		LOG(WARN) << "incomplete frame";
+		LOG_WARN << "incomplete frame";
 		return false;
 	}
 }
@@ -208,7 +223,10 @@
 		size_t numSPropRecords = 0;
 		SPropRecord *p_record = parseSPropParameterSets(fmtp.c_str(), numSPropRecords);
 		if (numSPropRecords < 2)
-			return false;//#todo log
+		{
+			LOG_WARN << "numSPropRecords < 2";
+			return false;
+		}
 
 		SPropRecord &sps = p_record[0];  
 		SPropRecord &pps = p_record[1];
@@ -216,8 +234,8 @@
 		bool ret = initH264DecoderEnv(in, sps.sPropBytes, sps.sPropLength, pps.sPropBytes, pps.sPropLength);
 		if (!ret)
 		{
-			LOG(ERROR) << "PL_H264Decoder::pay initH264DecoderEnv error";
-			return false; // #todo log
+			LOG_ERROR << "initH264DecoderEnv error";
+			return false;
 		}
 		else
 			in->fmtp_set_to_context = true;
@@ -250,8 +268,12 @@
 			in->lastFrame.buffSize = sizeof(in->pAVFrame);
 			in->lastFrame.width = in->pAVFrame->width;
 			in->lastFrame.height = in->pAVFrame->height;
-			//in->lastFrame.pts = frame->pts;//#todo
-			gettimeofday(&(in->lastFrame.pts),NULL);
+			
+			if (in->config.resetPTS)
+				gettimeofday(&(in->lastFrame.pts),NULL);
+			else
+				in->lastFrame.pts = frame->pts;
+			
 		}
 	}
 	
diff --git a/RtspFace/PL_H264Decoder.h b/RtspFace/PL_H264Decoder.h
index c4770a9..3967b20 100644
--- a/RtspFace/PL_H264Decoder.h
+++ b/RtspFace/PL_H264Decoder.h
@@ -3,6 +3,13 @@
 
 #include "PipeLine.h"
 
+struct PL_H264Decoder_Config
+{
+	bool resetPTS;
+
+	PL_H264Decoder_Config() : resetPTS(true) { }
+};
+
 class PL_H264Decoder : public PipeLineElem
 {
 public:
diff --git a/RtspFace/PL_H264Encoder.cpp b/RtspFace/PL_H264Encoder.cpp
index 7068b79..1b3123a 100644
--- a/RtspFace/PL_H264Encoder.cpp
+++ b/RtspFace/PL_H264Encoder.cpp
@@ -1,5 +1,6 @@
 #include "PL_H264Encoder.h"
 #include "MaterialBuffer.h"
+#include "logger.h"
 
 extern "C"
 {
@@ -12,26 +13,35 @@
 	#include <libyuv.h>
 }
 
+PL_H264Encoder_Config::PL_H264Encoder_Config() : 
+	inBufferSize(2*1024*1024), // 2MByte
+	resetPTS(false), 
+	bytesBufferImageWidth(0), bytesBufferImageHeight(0), 
+	avc_bit_rate(1*1024*1024*8), //1Mbit
+	avc_fps(25), avc_gop(25), avc_max_b_frames(0), avc_profile(FF_PROFILE_H264_MAIN), 
+	av_opt_preset("superfast"), av_opt_tune("")
+{
+	// av_opt_tune: zerolatency
+}
+
 struct H264Encoder_Internal
 {
-	uint8_t buffer[1920*1080*3];
+	uint8_t* buffer;
 	size_t buffSize;
-	size_t buffSizeMax;
 	bool payError;
 	bool ffmpegInited;
 	size_t frameCount;
 	MB_Frame lastFrame;
-
+	PL_H264Encoder_Config config;
+	
 	AVCodecContext* pAVCodecContext;
 	AVFrame* pAVFrame;//#todo delete
-	AVStream* pAVStream;
 	AVFormatContext* pAVFormatContext;
 	
 	H264Encoder_Internal() : 
-		buffSize(0), buffSizeMax(sizeof(buffer)), 
-		payError(true), ffmpegInited(false), frameCount(0), 
-		pAVCodecContext(nullptr), pAVFrame(nullptr), pAVStream(nullptr), pAVFormatContext(nullptr), 
-		lastFrame()
+		buffer(nullptr), buffSize(0), 
+		payError(true), ffmpegInited(false), frameCount(0), lastFrame(), config(), 
+		pAVCodecContext(nullptr), pAVFrame(nullptr), pAVFormatContext(nullptr)
 	{
 	}
 	
@@ -49,10 +59,16 @@
 		MB_Frame _lastFrame;
 		lastFrame = _lastFrame;
 		
+		PL_H264Encoder_Config _config;
+		config = _config;
+		
 		pAVCodecContext = nullptr;
 		pAVFrame = nullptr;
-		pAVStream = nullptr;
 		pAVFormatContext = nullptr;
+		
+		if (buffer != nullptr)
+			delete[] buffer;
+		buffer = new uint8_t[config.inBufferSize];
 	}
 };
 
@@ -76,6 +92,12 @@
 	H264Encoder_Internal* in = (H264Encoder_Internal*)internal;
 	in->reset();
 	
+	if (args != nullptr)
+	{
+		PL_H264Encoder_Config* config = (PL_H264Encoder_Config*)args;
+		in->config = *config;
+	}
+	
 	return true;
 }
 
@@ -94,24 +116,26 @@
 
 	if (!avCodec)   
 	{  
-		printf("codec not found!\n");  
+		LOG_ERROR << "codec not found!";  
 		return false;  
 	}  
 
 	in->pAVCodecContext = avcodec_alloc_context3(avCodec);
 
-	in->pAVCodecContext->bit_rate = 1*1024*1024*8; // 3MB
-    in->pAVCodecContext->width = 800;//#todo test
-    in->pAVCodecContext->height = 600;//#todo from config
-    in->pAVCodecContext->time_base.num=1;
-    in->pAVCodecContext->time_base.den=25;
-    in->pAVCodecContext->gop_size = 25;
-    in->pAVCodecContext->max_b_frames = 0;
-	//in->pAVCodecContext->profile = FF_PROFILE_H264_MAIN;
+	in->pAVCodecContext->bit_rate = in->config.avc_bit_rate;
+    in->pAVCodecContext->width = in->config.bytesBufferImageWidth;
+    in->pAVCodecContext->height = in->config.bytesBufferImageHeight;
+    in->pAVCodecContext->time_base.num = 1;
+    in->pAVCodecContext->time_base.den = in->config.avc_fps;
+    in->pAVCodecContext->gop_size = in->config.avc_gop;
+    in->pAVCodecContext->max_b_frames = in->config.avc_max_b_frames;
+	in->pAVCodecContext->profile = in->config.avc_profile;
     in->pAVCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
 	
-	av_opt_set(in->pAVCodecContext->priv_data, "preset", "superfast", 0);  
-	//av_opt_set(in->pAVCodecContext->priv_data, "tune", "zerolatency", 0);
+	if (!in->config.av_opt_preset.empty())
+		av_opt_set(in->pAVCodecContext->priv_data, "preset", in->config.av_opt_preset.c_str(), 0);
+	if (!in->config.av_opt_tune.empty())
+		av_opt_set(in->pAVCodecContext->priv_data, "tune", in->config.av_opt_tune.c_str(), 0);
 
 	if(avcodec_open2(in->pAVCodecContext, avCodec, NULL) >= 0)
 	{
@@ -121,59 +145,45 @@
 		in->pAVFrame->width  = in->pAVCodecContext->width;  
 		in->pAVFrame->height = in->pAVCodecContext->height;
 		
-		int ret = av_image_alloc(in->pAVFrame->data, in->pAVFrame->linesize, in->pAVCodecContext->width, in->pAVCodecContext->height, 
+		int ret = av_image_alloc(in->pAVFrame->data, in->pAVFrame->linesize, 
+							in->pAVCodecContext->width, in->pAVCodecContext->height, 
 							in->pAVCodecContext->pix_fmt, 16);  
 		if (ret < 0)
 		{  
-			printf("av_image_alloc error\n");
+			LOG_ERROR << "av_image_alloc error";
 			return false;
 		} 
 	}
 	else
 	{
-		printf("avcodec_open2 error\n");
+		LOG_ERROR << "avcodec_open2 error";
 		return false;
 	}
-	
-	//int ret = avformat_alloc_output_context2(&(in->pAVFormatContext), NULL, "avi", "");
-	//if (ret < 0 || in->pAVFormatContext == nullptr)
-	//{
-	//	printf("avformat_alloc_output_context2 error\n");
-	//	return false;
-	//}
-	//
-	//in->pAVStream = avformat_new_stream(in->pAVFormatContext, avCodec);
-	//if (in->pAVStream == nullptr)
-	//{
-	//	printf("avformat_new_stream error\n");
-	//	return false;
-	//}
-	//in->pAVStream->id = in->pAVFormatContext->nb_streams-1;
-	
+
 	return true;
 }
 
-#define SUBSAMPLE(v, a) ((((v) + (a) - 1)) / (a))
-
 void copyAVFrame(AVFrame* dest, AVFrame* src)
 {
-int src_width = src->width;
-int src_height = src->height;
-int dst_width = dest->width;
-int dst_height = dest->height;
-printf("I420Scale sw=%d, sh=%d, dw=%d, dh=%d\n", src_width,src_height,dst_width, dst_height);
-
-libyuv::I420Scale(src->data[0], src_width, 	
-				  src->data[1], SUBSAMPLE(src_width, 2), 
-				  src->data[2], SUBSAMPLE(src_width, 2), 
-				  src_width, src_height, 
-				  dest->data[0], dst_width, 	
-				  dest->data[1], SUBSAMPLE(dst_width, 2), 
-				  dest->data[2], SUBSAMPLE(dst_width, 2), 
-				  dst_width, dst_height, 
-				  libyuv::kFilterNone );
+//#test
+//#define SUBSAMPLE(v, a) ((((v) + (a) - 1)) / (a))
+	//int src_width = src->width;
+	//int src_height = src->height;
+	//int dst_width = dest->width;
+	//int dst_height = dest->height;
+	//printf("I420Scale sw=%d, sh=%d, dw=%d, dh=%d\n", src_width,src_height,dst_width, dst_height);
+    //
+	//libyuv::I420Scale(src->data[0], src_width, 	
+	//				  src->data[1], SUBSAMPLE(src_width, 2), 
+	//				  src->data[2], SUBSAMPLE(src_width, 2), 
+	//				  src_width, src_height, 
+	//				  dest->data[0], dst_width, 	
+	//				  dest->data[1], SUBSAMPLE(dst_width, 2), 
+	//				  dest->data[2], SUBSAMPLE(dst_width, 2), 
+	//				  dst_width, dst_height, 
+	//				  libyuv::kFilterNone );
 	
-	//#test
+	
 	//static size_t f=0;
 	//char fname[50];
 	//sprintf(fname, "%u.yuv420", ++f);
@@ -183,13 +193,12 @@
 	//fwrite (dest->data[2] , sizeof(char), dst_width * dst_height / 4, pFile);
 	//fclose(pFile);
 	
-	//dest->data[0] = src->data[0];
-	//dest->data[1] = src->data[1];
-	//dest->data[2] = src->data[2];
+	dest->data[0] = src->data[0];
+	dest->data[1] = src->data[1];
+	dest->data[2] = src->data[2];
 	
 	//int height = dest->height;
 	//int width = dest->width;
-	//
 	//memcpy(dest->data[0], src->data[0], height * width); // Y
 	//memcpy(dest->data[1], src->data[1], height * width / 4); // U
 	//memcpy(dest->data[2], src->data[2], height * width / 4); // V
@@ -218,14 +227,14 @@
 	int ret = avcodec_encode_video2(in->pAVCodecContext, &pAVPacket, in->pAVFrame, &gotPacket);  
 	if (ret < 0)
 	{
-		printf("avcodec_encode_video2 (1) error=%d\n", ret);
+		LOG_WARN << "avcodec_encode_video2 (1) error=" << ret;
 		return false;
 	}
 	
 	if (gotPacket > 0)
 	{
 		in->frameCount++;
-		printf("Succeed to encode (1) frame=%d, size=%d\n", in->frameCount, pAVPacket.size);
+		LOGP(DEBUG, "Succeed to encode (1) frame=%d, size=%d", in->frameCount, pAVPacket.size);
 		memcpy(in->buffer, pAVPacket.data, pAVPacket.size);
 		in->buffSize = pAVPacket.size;
 		av_free_packet(&pAVPacket);
@@ -265,12 +274,15 @@
 
 bool encodeH264(H264Encoder_Internal* in, uint8_t* buffer, timeval pts)
 {
+	uint16_t width = in->config.bytesBufferImageWidth;
+	uint16_t height = in->config.bytesBufferImageHeight;
+
 	AVFrame avFrame;
-	avFrame.width = 1920;//#todo
-	avFrame.height = 1080;
+	avFrame.width = width;
+	avFrame.height = height;
 	avFrame.data[0] = buffer;
-	avFrame.data[1] = buffer + 1920*1080;
-	avFrame.data[2] = buffer + 1920*1080 + 1920*1080/4;
+	avFrame.data[1] = buffer + width*height;
+	avFrame.data[2] = buffer + width*height + width*height/4;
 	return encodeH264(in, &avFrame, pts);
 }
 
@@ -282,10 +294,34 @@
 	
 	if (!in->ffmpegInited)
 	{
+		MB_Frame* frame = (MB_Frame*)pm.buffer;
+		if (frame != nullptr && frame->buffer != nullptr && 
+			(in->config.bytesBufferImageWidth == 0 || in->config.bytesBufferImageHeight == 0))
+		{
+			if (frame->type == MB_Frame::MBFT_PTR_AVFRAME)
+			{
+				AVFrame* pAVFrame = (AVFrame*)frame->buffer;
+				if (pAVFrame != nullptr)
+				{
+					in->config.bytesBufferImageWidth = pAVFrame->width;
+					in->config.bytesBufferImageHeight = pAVFrame->height;
+					LOGP(NOTICE, "Set codec size from AVFrame width=%d, height=%d", 
+						in->config.bytesBufferImageWidth, in->config.bytesBufferImageHeight);
+				}
+			}
+			else if (frame->type == MB_Frame::MBFT_YUV420)
+			{
+				in->config.bytesBufferImageWidth = frame->width;
+				in->config.bytesBufferImageHeight = frame->height;
+				LOGP(NOTICE, "Set codec size from frame width=%d, height=%d", 
+					in->config.bytesBufferImageWidth, in->config.bytesBufferImageHeight);
+			}
+		}
+		
 		bool ret = initH264EncoderEnv(in);
 		if (!ret)
 		{
-			printf("initH264EncoderEnv error\n");
+			LOG_ERROR << "initH264EncoderEnv error";
 			return false;
 		}
 		else
@@ -294,7 +330,7 @@
 	
 	if (pm.type != PipeMaterial::PMT_FRAME)
 	{
-		printf("PL_H264Encoder::pay only support PMT_FRAME\n");
+		LOG_ERROR << "Only support PMT_FRAME";
 		return false;
 	}
 	
@@ -311,7 +347,7 @@
 		ret = encodeH264(in, (uint8_t*)(frame->buffer), frame->pts);
 	else
 	{
-		printf("PL_H264Encoder::pay only support MBFT_PTR_AVFRAME / MBFT_YUV420\n");
+		LOG_ERROR << "Only support MBFT_PTR_AVFRAME / MBFT_YUV420";
 		in->payError = true;
 		return false;
 	}
@@ -326,6 +362,7 @@
 		in->lastFrame.width = frame->width;
 		in->lastFrame.height = frame->height;
 		in->lastFrame.pts = frame->pts;
+		//#todo resetPts
 	}
 	
 	return ret;
diff --git a/RtspFace/PL_H264Encoder.h b/RtspFace/PL_H264Encoder.h
index 9c85662..e01f0af 100644
--- a/RtspFace/PL_H264Encoder.h
+++ b/RtspFace/PL_H264Encoder.h
@@ -3,6 +3,26 @@
 
 #include "PipeLine.h"
 
+struct PL_H264Encoder_Config
+{
+	size_t inBufferSize;
+	bool resetPTS;
+	
+	uint16_t bytesBufferImageWidth; // only useful for PMT_BYTES / MBFT_YUV420 / MBFT_BGRA
+	uint16_t bytesBufferImageHeight;
+	
+	size_t avc_bit_rate;
+	uint16_t avc_fps;
+	uint16_t avc_gop;
+	uint16_t avc_max_b_frames;
+	int avc_profile; // FF_PROFILE_H264_BASELINE / FF_PROFILE_H264_MAIN / FF_PROFILE_H264_HIGH
+
+	std::string av_opt_preset;
+	std::string av_opt_tune;
+	
+	PL_H264Encoder_Config();
+};
+
 class PL_H264Encoder : public PipeLineElem
 {
 public:
diff --git a/RtspFace/PL_RTSPClient.cpp b/RtspFace/PL_RTSPClient.cpp
index ae885d7..0a1b163 100644
--- a/RtspFace/PL_RTSPClient.cpp
+++ b/RtspFace/PL_RTSPClient.cpp
@@ -7,12 +7,12 @@
 void rtsp_client_fmtp_callback(void* arg, const char* val);
 void rtsp_client_frame_callback(void* arg, uint8_t* buffer, size_t buffSize, timeval presentationTime);
 void rtsp_client_continue_callback(void* arg);
-//struct RTSPConfig;
+//struct PL_RTSPClient_Config;
 #include "live555/testProgs/testRTSPClient.hpp"
 
 struct RTSPClient_Internal
 {
-	RTSPConfig rtspConfig;
+	PL_RTSPClient_Config rtspConfig;
 	pthread_t live_daemon_thid;
 	char eventLoopWatchVariable;
 	bool live_daemon_running;
@@ -50,7 +50,7 @@
 	
 	void reset()
 	{
-		RTSPConfig _rtspConfig;
+		PL_RTSPClient_Config _rtspConfig;
 		rtspConfig = _rtspConfig;
 		live_daemon_thid = 0;
 		eventLoopWatchVariable = 0;
@@ -117,7 +117,7 @@
 	if (args == nullptr)
 		return false;
 
-	const RTSPConfig* config = reinterpret_cast<const RTSPConfig*>(args);
+	const PL_RTSPClient_Config* config = reinterpret_cast<const PL_RTSPClient_Config*>(args);
 	RTSPClient_Internal* in = (RTSPClient_Internal*)internal;
 	in->reset();
 	in->rtspConfig = *config;
diff --git a/RtspFace/PL_RTSPClient.h b/RtspFace/PL_RTSPClient.h
index 91fa685..6e43253 100644
--- a/RtspFace/PL_RTSPClient.h
+++ b/RtspFace/PL_RTSPClient.h
@@ -4,7 +4,7 @@
 #include "PipeLine.h"
 #include <string>
 
-struct RTSPConfig
+struct PL_RTSPClient_Config
 {
 	std::string progName;
 	std::string rtspURL;
@@ -13,7 +13,7 @@
 	int tunnelOverHTTPPortNum; // portNumBits
 	void* args;
 	
-	RTSPConfig() : 
+	PL_RTSPClient_Config() : 
 		progName(), rtspURL() ,aux(true), verbosityLevel(1), tunnelOverHTTPPortNum(0), args(nullptr)
 	{ }
 };
diff --git a/RtspFace/PL_RTSPServer.cpp b/RtspFace/PL_RTSPServer.cpp
index 79b78fd..6d31f34 100644
--- a/RtspFace/PL_RTSPServer.cpp
+++ b/RtspFace/PL_RTSPServer.cpp
@@ -113,7 +113,7 @@
 		*FrameBuffer = pBuffer;
 		*FrameSize = newBufferSize;
 
-		LOG(DEBUG) << "send frame size=" << in.buffSize;
+		LOG_DEBUG << "send frame size=" << in.buffSize;
 	}
 	
 	virtual char ReleaseFrame()
@@ -125,7 +125,7 @@
 			int ret = pthread_mutex_unlock(in.frame_mutex);
 			if(ret != 0)
 			{
-				LOG(WARN) << "pthread_mutex_unlock frame_mutex: " << strerror(ret);
+				LOG_WARN << "pthread_mutex_unlock frame_mutex: " << strerror(ret);
 				return 0;
 			}
 		}
@@ -143,7 +143,7 @@
 			int ret = pthread_mutex_lock(in.frame_mutex);
 			if(ret != 0)
 			{
-				LOG(WARN) << "pthread_mutex_lock frame_mutex: " << strerror(ret);
+				LOG_WARN << "pthread_mutex_lock frame_mutex: " << strerror(ret);
 				return;
 			}
 		}
@@ -197,7 +197,7 @@
 	int ret = pthread_create(&(in->live_daemon_thid), NULL, live_daemon_thd, in);
 	if(ret != 0)
 	{
-		LOG(ERROR) << "pthread_create: " << strerror(ret);
+		LOG_ERROR << "pthread_create: " << strerror(ret);
 		return false;
 	}
 
@@ -220,12 +220,12 @@
 	
 	if (pm.type != PipeMaterial::PMT_FRAME)
 	{
-		LOG(ERROR) << "PL_RTSPServer::pay only support PMT_FRAME";
+		LOG_ERROR << "PL_RTSPServer::pay only support PMT_FRAME";
 		return false;
 	}
 	
 	if (in->buffSize > 0)
-		LOG(WARN) << "PL_RTSPServer::pay may lost data size=" << in->buffSize;
+		LOG_WARN << "PL_RTSPServer::pay may lost data size=" << in->buffSize;
 	
 	MB_Frame* frame = (MB_Frame*)pm.buffer;
 	if (frame->buffer == nullptr)
diff --git a/RtspFace/PL_SensetimeFaceDetect.cpp b/RtspFace/PL_SensetimeFaceDetect.cpp
index 85245fb..435bea3 100644
--- a/RtspFace/PL_SensetimeFaceDetect.cpp
+++ b/RtspFace/PL_SensetimeFaceDetect.cpp
@@ -70,7 +70,7 @@
 		in->config.point_size_config = CV_DETECT_ENABLE_ALIGN_106;
 	else
 	{
-		LOG(ERROR) << "alignment point size must be 21 or 106";
+		LOG_ERROR << "alignment point size must be 21 or 106";
 		return false;
 	}
 
@@ -79,7 +79,7 @@
 								in->config.point_size_config | CV_FACE_TRACKING_TWO_THREAD);
 	if (cv_result != CV_OK)
 	{
-		LOG(ERROR) << "cv_face_create_tracker failed, error code" << cv_result;
+		LOG_ERROR << "cv_face_create_tracker failed, error code" << cv_result;
 		return false;
 	}
 
@@ -87,11 +87,11 @@
 	cv_result = cv_face_track_set_detect_face_cnt_limit(in->handle_track, in->config.detect_face_cnt_limit, &val);
 	if (cv_result != CV_OK)
 	{
-		LOG(ERROR) << "cv_face_track_set_detect_face_cnt_limit failed, error : " << cv_result;
+		LOG_ERROR << "cv_face_track_set_detect_face_cnt_limit failed, error : " << cv_result;
 		return false;
 	}
 	else
-		LOG(ERROR) << "detect face count limit : " << val;
+		LOG_ERROR << "detect face count limit : " << val;
 	
 	return true;
 }
@@ -120,7 +120,7 @@
 							CV_FACE_UP, &p_face, &face_count);
 	if (cv_result != CV_OK)
 	{
-		LOG(ERROR) << "cv_face_track failed, error : " << cv_result;
+		LOG_ERROR << "cv_face_track failed, error : " << cv_result;
 		cv_face_release_tracker_result(p_face, face_count);
 		return -1;
 	}
@@ -130,11 +130,11 @@
 	cv::Mat yMat(cv::Size(1920,1080), CV_8UC1, buffer);
 	for (int i = 0; i < face_count; i++)
 	{
-		LOGP(DEBUG, "face: %d-----[%d, %d, %d, %d]-----id: %d\n", i,
+		LOGP(DEBUG, "face: %d-----[%d, %d, %d, %d]-----id: %d", i,
 			p_face[i].rect.left, p_face[i].rect.top,
 			p_face[i].rect.right, p_face[i].rect.bottom, p_face[i].ID);
 			
-		LOGP(DEBUG, "face pose: [yaw: %.2f, pitch: %.2f, roll: %.2f, eye distance: %.2f]\n",
+		LOGP(DEBUG, "face pose: [yaw: %.2f, pitch: %.2f, roll: %.2f, eye distance: %.2f]",
 			p_face[i].yaw,
 			p_face[i].pitch, p_face[i].roll, p_face[i].eye_dist);
 
@@ -180,7 +180,7 @@
 
 	if (pm.type != PipeMaterial::PMT_FRAME)
 	{
-		LOG(ERROR) << "PL_H264Encoder::pay only support PMT_FRAME";
+		LOG_ERROR << "PL_H264Encoder::pay only support PMT_FRAME";
 		return false;
 	}
 	
@@ -190,7 +190,7 @@
 	MB_Frame* frame = (MB_Frame*)pm.buffer;
 	if (frame->type != MB_Frame::MBFT_YUV420)
 	{
-		LOG(ERROR) << "PL_H264Encoder::pay only support MBFT_YUV420";
+		LOG_ERROR << "PL_H264Encoder::pay only support MBFT_YUV420";
 		return false;
 	}
 
diff --git a/RtspFace/PipeLine.cpp b/RtspFace/PipeLine.cpp
index 2491199..3547d94 100644
--- a/RtspFace/PipeLine.cpp
+++ b/RtspFace/PipeLine.cpp
@@ -90,16 +90,16 @@
 	PipeDebugger(PipeLine* _pipeLine) : 
 		pipeLine(_pipeLine), retElem(nullptr), pm(nullptr)
 	{
-		LOG(DEBUG) << "pipe line begin";
+		LOG_DEBUG << "pipe line begin";
 	}
 	
 	~PipeDebugger()
 	{
 		bool retOK = (*(pipeLine->elems).rbegin() == retElem);
 		if (retOK)
-			LOG(DEBUG) << "pipe line end, ret OK";
+			LOG_DEBUG << "pipe line end, ret OK";
 		else
-			LOG(WARN) << "pipe line end, ret ERROR";
+			LOG_WARN << "pipe line end, ret ERROR";
 	}
 };
 
diff --git a/RtspFace/live555/testProgs/testRTSPClient.hpp b/RtspFace/live555/testProgs/testRTSPClient.hpp
index 8f33f1a..ff4a861 100644
--- a/RtspFace/live555/testProgs/testRTSPClient.hpp
+++ b/RtspFace/live555/testProgs/testRTSPClient.hpp
@@ -33,10 +33,11 @@
 
 // Even though we're not going to be doing anything with the incoming data, we still need to receive it.
 // Define the size of the buffer that we'll use:
-#define DUMMY_SINK_RECEIVE_BUFFER_SIZE 1920*1080*3
+#define DUMMY_SINK_RECEIVE_BUFFER_SIZE 1920*1080*3//#todo
 
 // If you don't want to see debugging output for each received frame, then comment out the following line:
-#define DEBUG_PRINT_EACH_RECEIVED_FRAME 1
+//#define DEBUG_PRINT_EACH_RECEIVED_FRAME 1
+//#define DEBUG_PRINT_NPT 1
 
 // Forward function definitions:
 
@@ -49,10 +50,10 @@
 void subsessionAfterPlaying(void* clientData); // called when a stream's subsession (e.g., audio or video substream) ends
 void subsessionByeHandler(void* clientData); // called when a RTCP "BYE" is received for a subsession
 void streamTimerHandler(void* clientData);
-  // called at the end of a stream's expected duration (if the stream has not already signaled its end using a RTCP "BYE")
+// called at the end of a stream's expected duration (if the stream has not already signaled its end using a RTCP "BYE")
 
 // The main streaming routine (for each "rtsp://" URL):
-void openURL(UsageEnvironment& env, const RTSPConfig& _rtspConfig);
+void openURL(UsageEnvironment& env, const PL_RTSPClient_Config& _rtspConfig);
 
 // Used to iterate through each stream's 'subsessions', setting up each one:
 void setupNextSubsession(RTSPClient* rtspClient);
@@ -61,75 +62,82 @@
 void shutdownStream(RTSPClient* rtspClient, int exitCode = 1);
 
 // A function that outputs a string that identifies each stream (for debugging output).  Modify this if you wish:
-UsageEnvironment& operator<<(UsageEnvironment& env, const RTSPClient& rtspClient) {
-  return env << "[URL:\"" << rtspClient.url() << "\"]: ";
+log4cpp::CategoryStream& operator<<(log4cpp::CategoryStream& logRoot, const RTSPClient& rtspClient)
+{
+	return logRoot << "[URL:\"" << rtspClient.url() << "\"]: ";
 }
 
 // A function that outputs a string that identifies each subsession (for debugging output).  Modify this if you wish:
-UsageEnvironment& operator<<(UsageEnvironment& env, const MediaSubsession& subsession) {
-  return env << subsession.mediumName() << "/" << subsession.codecName();
+log4cpp::CategoryStream& operator<<(log4cpp::CategoryStream& logRoot, const MediaSubsession& subsession)
+{
+	return logRoot << subsession.mediumName() << "/" << subsession.codecName();
 }
 
-void usage(UsageEnvironment& env, char const* progName) {
-  env << "Usage: " << progName << " <rtsp-url-1> ... <rtsp-url-N>\n";
-  env << "\t(where each <rtsp-url-i> is a \"rtsp://\" URL)\n";
+void usage(UsageEnvironment& env, char const* progName)
+{
+	LOG_DEBUG << "Usage: " << progName << " <rtsp-url-1> ... <rtsp-url-N>";
+	LOG_DEBUG << "\t(where each <rtsp-url-i> is a \"rtsp://\" URL)";
 }
 
 char eventLoopWatchVariable = 0;
 
-int test_main(int argc, char** argv) {
-  // Begin by setting up our usage environment:
-  TaskScheduler* scheduler = BasicTaskScheduler::createNew();
-  UsageEnvironment* env = BasicUsageEnvironment::createNew(*scheduler);
+int test_main(int argc, char** argv)
+{
+	// Begin by setting up our usage environment:
+	TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+	UsageEnvironment* env = BasicUsageEnvironment::createNew(*scheduler);
 
-  // We need at least one "rtsp://" URL argument:
-  if (argc < 2) {
-    usage(*env, argv[0]);
-    return 1;
-  }
+	// We need at least one "rtsp://" URL argument:
+	if (argc < 2)
+		{
+			usage(*env, argv[0]);
+			return 1;
+		}
 
-	RTSPConfig rtspConfig;
+	PL_RTSPClient_Config rtspConfig;
 	rtspConfig.progName = argv[0];
 	rtspConfig.rtspURL = "";
 	rtspConfig.aux = false;
 	rtspConfig.verbosityLevel = RTSP_CLIENT_VERBOSITY_LEVEL;
 	rtspConfig.tunnelOverHTTPPortNum = 0;
 	rtspConfig.args = nullptr;
-  
-  // There are argc-1 URLs: argv[1] through argv[argc-1].  Open and start streaming each one:
-  for (int i = 1; i <= argc-1; ++i) {
-	rtspConfig.rtspURL = argv[i];
-	openURL(*env, rtspConfig);
-  }
 
-  // All subsequent activity takes place within the event loop:
-  env->taskScheduler().doEventLoop(&eventLoopWatchVariable);
-    // This function call does not return, unless, at some point in time, "eventLoopWatchVariable" gets set to something non-zero.
+	// There are argc-1 URLs: argv[1] through argv[argc-1].  Open and start streaming each one:
+	for (int i = 1; i <= argc-1; ++i)
+		{
+			rtspConfig.rtspURL = argv[i];
+			openURL(*env, rtspConfig);
+		}
 
-  return 0;
+	// All subsequent activity takes place within the event loop:
+	env->taskScheduler().doEventLoop(&eventLoopWatchVariable);
+	// This function call does not return, unless, at some point in time, "eventLoopWatchVariable" gets set to something non-zero.
 
-  // If you choose to continue the application past this point (i.e., if you comment out the "return 0;" statement above),
-  // and if you don't intend to do anything more with the "TaskScheduler" and "UsageEnvironment" objects,
-  // then you can also reclaim the (small) memory used by these objects by uncommenting the following code:
-  /*
-    env->reclaim(); env = NULL;
-    delete scheduler; scheduler = NULL;
-  */
+	return 0;
+
+	// If you choose to continue the application past this point (i.e., if you comment out the "return 0;" statement above),
+	// and if you don't intend to do anything more with the "TaskScheduler" and "UsageEnvironment" objects,
+	// then you can also reclaim the (small) memory used by these objects by uncommenting the following code:
+	/*
+	  env->reclaim(); env = NULL;
+	  delete scheduler; scheduler = NULL;
+	*/
 }
 
 // Define a class to hold per-stream state that we maintain throughout each stream's lifetime:
 
-class StreamClientState {
+class StreamClientState
+{
 public:
-  StreamClientState();
-  virtual ~StreamClientState();
+	StreamClientState();
+	virtual ~StreamClientState();
 
 public:
-  MediaSubsessionIterator* iter;
-  MediaSession* session;
-  MediaSubsession* subsession;
-  TaskToken streamTimerTask;
-  double duration;
+	MediaSubsessionIterator* iter;
+	MediaSession* session;
+	MediaSubsession* subsession;
+	TaskToken streamTimerTask;
+	double duration;
 };
 
 // If you're streaming just a single stream (i.e., just from a single URL, once), then you can define and use just a single
@@ -137,18 +145,19 @@
 // showing how to play multiple streams, concurrently, we can't do that.  Instead, we have to have a separate "StreamClientState"
 // structure for each "RTSPClient".  To do this, we subclass "RTSPClient", and add a "StreamClientState" field to the subclass:
 
-class ourRTSPClient: public RTSPClient {
+class ourRTSPClient: public RTSPClient
+{
 public:
-  static ourRTSPClient* createNew(UsageEnvironment& env, const RTSPConfig& _rtspConfig);
+	static ourRTSPClient* createNew(UsageEnvironment& env, const PL_RTSPClient_Config& _rtspConfig);
 
 protected:
-  ourRTSPClient(UsageEnvironment& env, const RTSPConfig& _rtspConfig);
-    // called only by createNew();
-  virtual ~ourRTSPClient();
+	ourRTSPClient(UsageEnvironment& env, const PL_RTSPClient_Config& _rtspConfig);
+	// called only by createNew();
+	virtual ~ourRTSPClient();
 
 public:
-  StreamClientState scs;
-  const RTSPConfig& rtspConfig;
+	StreamClientState scs;
+	const PL_RTSPClient_Config& rtspConfig;
 };
 
 // Define a data sink (a subclass of "MediaSink") to receive the data for each subsession (i.e., each audio or video 'substream').
@@ -159,25 +168,25 @@
 class DummySink: public MediaSink
 {
 public:
-	static DummySink* createNew(UsageEnvironment& env, 
-				  const RTSPConfig& _rtspConfig,
-				  MediaSubsession& subsession, // identifies the kind of data that's being received
-				  char const* streamId = NULL); // identifies the stream itself (optional)
+	static DummySink* createNew(UsageEnvironment& env,
+	                            const PL_RTSPClient_Config& _rtspConfig,
+	                            MediaSubsession& subsession, // identifies the kind of data that's being received
+	                            char const* streamId = NULL); // identifies the stream itself (optional)
 
 private:
-	DummySink(UsageEnvironment& env, const RTSPConfig& _rtspConfig, MediaSubsession& subsession, char const* streamId);
+	DummySink(UsageEnvironment& env, const PL_RTSPClient_Config& _rtspConfig, MediaSubsession& subsession, char const* streamId);
 	// called only by "createNew()"
 	virtual ~DummySink();
 
 	static void afterGettingFrame(void* clientData, unsigned frameSize,
-								unsigned numTruncatedBytes,
-				struct timeval presentationTime,
-								unsigned durationInMicroseconds);
+	                              unsigned numTruncatedBytes,
+	                              struct timeval presentationTime,
+	                              unsigned durationInMicroseconds);
 	void afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
-			 struct timeval presentationTime, unsigned durationInMicroseconds);
+	                       struct timeval presentationTime, unsigned durationInMicroseconds);
 
 public:
-	const RTSPConfig& rtspConfig;
+	const PL_RTSPClient_Config& rtspConfig;
 
 private:
 	// redefined virtual functions:
@@ -191,23 +200,23 @@
 
 static unsigned rtspClientCount = 0; // Counts how many streams (i.e., "RTSPClient"s) are currently in use.
 
-void openURL(UsageEnvironment& env, const RTSPConfig& _rtspConfig)
+void openURL(UsageEnvironment& env, const PL_RTSPClient_Config& _rtspConfig)
 {
 	// Begin by creating a "RTSPClient" object.  Note that there is a separate "RTSPClient" object for each stream that we wish
 	// to receive (even if more than stream uses the same "rtsp://" URL).
 	RTSPClient* rtspClient = ourRTSPClient::createNew(env, _rtspConfig);
 	if (rtspClient == NULL)
-	{
-		env << "Failed to create a RTSP client for URL \"" << _rtspConfig.rtspURL.c_str() << "\": " << env.getResultMsg() << "\n";
-		return;
-	}
+		{
+			LOG_ERROR << "Failed to create a RTSP client for URL \"" << _rtspConfig.rtspURL.c_str() << "\": " << env.getResultMsg();
+			return;
+		}
 
 	++rtspClientCount;
 
 	// Next, send a RTSP "DESCRIBE" command, to get a SDP description for the stream.
 	// Note that this command - like all RTSP commands - is sent asynchronously; we do not block, waiting for a response.
 	// Instead, the following function call returns immediately, and we handle the RTSP response later, from within the event loop:
-	rtspClient->sendDescribeCommand(continueAfterDESCRIBE); 
+	rtspClient->sendDescribeCommand(continueAfterDESCRIBE);
 }
 
 
@@ -216,41 +225,42 @@
 void continueAfterDESCRIBE(RTSPClient* rtspClient, int resultCode, char* resultString)
 {
 	do
-	{
-		UsageEnvironment& env = rtspClient->envir(); // alias
-		StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias
-
-		if (resultCode != 0)
 		{
-			env << *rtspClient << "Failed to get a SDP description: " << resultString << "\n";
-			delete[] resultString;
-			break;
-		}
+			UsageEnvironment& env = rtspClient->envir(); // alias
+			StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias
 
-		char* const sdpDescription = resultString;
-		env << *rtspClient << "Got a SDP description:\n" << sdpDescription << "\n";
+			if (resultCode != 0)
+				{
+					LOG_WARN << *rtspClient << "Failed to get a SDP description: " << resultString;
+					delete[] resultString;
+					break;
+				}
 
-		// Create a media session object from this SDP description:
-		scs.session = MediaSession::createNew(env, sdpDescription);
-		delete[] sdpDescription; // because we don't need it anymore
-		if (scs.session == NULL)
-		{
-			env << *rtspClient << "Failed to create a MediaSession object from the SDP description: " << env.getResultMsg() << "\n";
-			break;
-		}
-		else if (!scs.session->hasSubsessions())
-		{
-			env << *rtspClient << "This session has no media subsessions (i.e., no \"m=\" lines)\n";
-			break;
-		}
+			char* const sdpDescription = resultString;
+			LOG_INFO << *rtspClient << "Got a SDP description:\n" << sdpDescription;
 
-		// Then, create and set up our data source objects for the session.  We do this by iterating over the session's 'subsessions',
-		// calling "MediaSubsession::initiate()", and then sending a RTSP "SETUP" command, on each one.
-		// (Each 'subsession' will have its own data source.)
-		scs.iter = new MediaSubsessionIterator(*scs.session);
-		setupNextSubsession(rtspClient);
-		return;
-	} while (0);
+			// Create a media session object from this SDP description:
+			scs.session = MediaSession::createNew(env, sdpDescription);
+			delete[] sdpDescription; // because we don't need it anymore
+			if (scs.session == NULL)
+				{
+					LOG_ERROR << *rtspClient << "Failed to create a MediaSession object from the SDP description: " << env.getResultMsg();
+					break;
+				}
+			else if (!scs.session->hasSubsessions())
+				{
+					LOG_WARN << *rtspClient << "This session has no media subsessions (i.e., no \"m=\" lines)";
+					break;
+				}
+
+			// Then, create and set up our data source objects for the session.  We do this by iterating over the session's 'subsessions',
+			// calling "MediaSubsession::initiate()", and then sending a RTSP "SETUP" command, on each one.
+			// (Each 'subsession' will have its own data source.)
+			scs.iter = new MediaSubsessionIterator(*scs.session);
+			setupNextSubsession(rtspClient);
+			return;
+		}
+	while (0);
 
 	// An unrecoverable error occurred with this stream.
 	shutdownStream(rtspClient);
@@ -258,319 +268,365 @@
 
 void setupNextSubsession(RTSPClient* rtspClient)
 {
-  UsageEnvironment& env = rtspClient->envir(); // alias
-  StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias
-  
-  scs.subsession = scs.iter->next();
-  if (scs.subsession != NULL) {
-    if (!scs.subsession->initiate()) {
-      env << *rtspClient << "Failed to initiate the \"" << *scs.subsession << "\" subsession: " << env.getResultMsg() << "\n";
-      setupNextSubsession(rtspClient); // give up on this subsession; go to the next one
-    } else {
-      env << *rtspClient << "Initiated the \"" << *scs.subsession << "\" subsession (";
-      if (scs.subsession->rtcpIsMuxed()) {
-	env << "client port " << scs.subsession->clientPortNum();
-      } else {
-	env << "client ports " << scs.subsession->clientPortNum() << "-" << scs.subsession->clientPortNum()+1;
-      }
-      env << ")\n";
+	UsageEnvironment& env = rtspClient->envir(); // alias
+	StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias
 
-      // Continue setting up this subsession, by sending a RTSP "SETUP" command:
-      rtspClient->sendSetupCommand(*scs.subsession, continueAfterSETUP, False, REQUEST_STREAMING_OVER_TCP);
-    }
-    return;
-  }
+	scs.subsession = scs.iter->next();
+	if (scs.subsession != NULL)
+		{
+			if (!scs.subsession->initiate())
+				{
+					LOG_ERROR << *rtspClient << "Failed to initiate the \"" << *scs.subsession << "\" subsession: " << env.getResultMsg();
+					setupNextSubsession(rtspClient); // give up on this subsession; go to the next one
+				}
+			else
+				{
+					LOG_INFO <<  *rtspClient << "Initiated the \"" << *scs.subsession << "\" subsession (";
+					if (scs.subsession->rtcpIsMuxed())
+						LOG_INFO <<  "client port " << scs.subsession->clientPortNum();
+					else
+						LOG_INFO <<  "client ports " << scs.subsession->clientPortNum() << "-" << scs.subsession->clientPortNum()+1;
+					LOG_INFO <<  ")";
 
-  // We've finished setting up all of the subsessions.  Now, send a RTSP "PLAY" command to start the streaming:
-  if (scs.session->absStartTime() != NULL) {
-    // Special case: The stream is indexed by 'absolute' time, so send an appropriate "PLAY" command:
-    rtspClient->sendPlayCommand(*scs.session, continueAfterPLAY, scs.session->absStartTime(), scs.session->absEndTime());
-  } else {
-    scs.duration = scs.session->playEndTime() - scs.session->playStartTime();
-    rtspClient->sendPlayCommand(*scs.session, continueAfterPLAY);
-  }
+					// Continue setting up this subsession, by sending a RTSP "SETUP" command:
+					rtspClient->sendSetupCommand(*scs.subsession, continueAfterSETUP, False, REQUEST_STREAMING_OVER_TCP);
+				}
+			return;
+		}
+
+	// We've finished setting up all of the subsessions.  Now, send a RTSP "PLAY" command to start the streaming:
+	if (scs.session->absStartTime() != NULL)
+		{
+			// Special case: The stream is indexed by 'absolute' time, so send an appropriate "PLAY" command:
+			rtspClient->sendPlayCommand(*scs.session, continueAfterPLAY, scs.session->absStartTime(), scs.session->absEndTime());
+		}
+	else
+		{
+			scs.duration = scs.session->playEndTime() - scs.session->playStartTime();
+			rtspClient->sendPlayCommand(*scs.session, continueAfterPLAY);
+		}
 }
 
-void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, char* resultString) {
-  do {
-    UsageEnvironment& env = rtspClient->envir(); // alias
-    StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias
+void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, char* resultString)
+{
+	do
+		{
+			UsageEnvironment& env = rtspClient->envir(); // alias
+			StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias
 
-    if (resultCode != 0) {
-      env << *rtspClient << "Failed to set up the \"" << *scs.subsession << "\" subsession: " << resultString << "\n";
-      break;
-    }
+			if (resultCode != 0)
+				{
+					LOG_ERROR << *rtspClient << "Failed to set up the \"" << *scs.subsession << "\" subsession: " << resultString;
+					break;
+				}
 
-    env << *rtspClient << "Set up the \"" << *scs.subsession << "\" subsession (";
-    if (scs.subsession->rtcpIsMuxed()) {
-      env << "client port " << scs.subsession->clientPortNum();
-    } else {
-      env << "client ports " << scs.subsession->clientPortNum() << "-" << scs.subsession->clientPortNum()+1;
-    }
-    env << ")\n";
+			LOG_INFO << *rtspClient << "Set up the \"" << *scs.subsession << "\" subsession (";
+			if (scs.subsession->rtcpIsMuxed())
+				{
+					LOG_INFO << "client port " << scs.subsession->clientPortNum();
+				}
+			else
+				{
+					LOG_INFO << "client ports " << scs.subsession->clientPortNum() << "-" << scs.subsession->clientPortNum()+1;
+				}
+			LOG_INFO << ")";
 
-    // Having successfully setup the subsession, create a data sink for it, and call "startPlaying()" on it.
-    // (This will prepare the data sink to receive data; the actual flow of data from the client won't start happening until later,
-    // after we've sent a RTSP "PLAY" command.)
+			// Having successfully setup the subsession, create a data sink for it, and call "startPlaying()" on it.
+			// (This will prepare the data sink to receive data; the actual flow of data from the client won't start happening until later,
+			// after we've sent a RTSP "PLAY" command.)
 
-    scs.subsession->sink = DummySink::createNew(env, ((ourRTSPClient*)rtspClient)->rtspConfig, 
-												*scs.subsession, rtspClient->url());
-      // perhaps use your own custom "MediaSink" subclass instead
-    if (scs.subsession->sink == NULL) {
-      env << *rtspClient << "Failed to create a data sink for the \"" << *scs.subsession
-	  << "\" subsession: " << env.getResultMsg() << "\n";
-      break;
-    }
+			scs.subsession->sink = DummySink::createNew(env, ((ourRTSPClient*)rtspClient)->rtspConfig,
+			                       *scs.subsession, rtspClient->url());
+			// perhaps use your own custom "MediaSink" subclass instead
+			if (scs.subsession->sink == NULL)
+				{
+					LOG_ERROR << *rtspClient << "Failed to create a data sink for the \"" << *scs.subsession
+					    << "\" subsession: " << env.getResultMsg();
+					break;
+				}
 
-    env << *rtspClient << "Created a data sink for the \"" << *scs.subsession << "\" subsession\n";
-    scs.subsession->miscPtr = rtspClient; // a hack to let subsession handler functions get the "RTSPClient" from the subsession 
-    scs.subsession->sink->startPlaying(*(scs.subsession->readSource()),
-				       subsessionAfterPlaying, scs.subsession);
-    // Also set a handler to be called if a RTCP "BYE" arrives for this subsession:
-    if (scs.subsession->rtcpInstance() != NULL) {
-      scs.subsession->rtcpInstance()->setByeHandler(subsessionByeHandler, scs.subsession);
-    }
-  } while (0);
-  delete[] resultString;
+			LOG_INFO << *rtspClient << "Created a data sink for the \"" << *scs.subsession << "\" subsession";
+			scs.subsession->miscPtr = rtspClient; // a hack to let subsession handler functions get the "RTSPClient" from the subsession
+			scs.subsession->sink->startPlaying(*(scs.subsession->readSource()),
+			                                   subsessionAfterPlaying, scs.subsession);
+			// Also set a handler to be called if a RTCP "BYE" arrives for this subsession:
+			if (scs.subsession->rtcpInstance() != NULL)
+				{
+					scs.subsession->rtcpInstance()->setByeHandler(subsessionByeHandler, scs.subsession);
+				}
+		}
+	while (0);
+	delete[] resultString;
 
-  // Set up the next subsession, if any:
-  setupNextSubsession(rtspClient);
+	// Set up the next subsession, if any:
+	setupNextSubsession(rtspClient);
 }
 
-void continueAfterPLAY(RTSPClient* rtspClient, int resultCode, char* resultString) {
-  Boolean success = False;
+void continueAfterPLAY(RTSPClient* rtspClient, int resultCode, char* resultString)
+{
+	Boolean success = False;
 
-  do {
-    UsageEnvironment& env = rtspClient->envir(); // alias
-    StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias
+	do
+		{
+			UsageEnvironment& env = rtspClient->envir(); // alias
+			StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias
 
-    if (resultCode != 0) {
-      env << *rtspClient << "Failed to start playing session: " << resultString << "\n";
-      break;
-    }
+			if (resultCode != 0)
+				{
+					LOG_ERROR << *rtspClient << "Failed to start playing session: " << resultString;
+					break;
+				}
 
-    // Set a timer to be handled at the end of the stream's expected duration (if the stream does not already signal its end
-    // using a RTCP "BYE").  This is optional.  If, instead, you want to keep the stream active - e.g., so you can later
-    // 'seek' back within it and do another RTSP "PLAY" - then you can omit this code.
-    // (Alternatively, if you don't want to receive the entire stream, you could set this timer for some shorter value.)
-    if (scs.duration > 0) {
-      unsigned const delaySlop = 2; // number of seconds extra to delay, after the stream's expected duration.  (This is optional.)
-      scs.duration += delaySlop;
-      unsigned uSecsToDelay = (unsigned)(scs.duration*1000000);
-      scs.streamTimerTask = env.taskScheduler().scheduleDelayedTask(uSecsToDelay, (TaskFunc*)streamTimerHandler, rtspClient);
-    }
+			// Set a timer to be handled at the end of the stream's expected duration (if the stream does not already signal its end
+			// using a RTCP "BYE").  This is optional.  If, instead, you want to keep the stream active - e.g., so you can later
+			// 'seek' back within it and do another RTSP "PLAY" - then you can omit this code.
+			// (Alternatively, if you don't want to receive the entire stream, you could set this timer for some shorter value.)
+			if (scs.duration > 0)
+				{
+					unsigned const delaySlop = 2; // number of seconds extra to delay, after the stream's expected duration.  (This is optional.)
+					scs.duration += delaySlop;
+					unsigned uSecsToDelay = (unsigned)(scs.duration*1000000);
+					scs.streamTimerTask = env.taskScheduler().scheduleDelayedTask(uSecsToDelay, (TaskFunc*)streamTimerHandler, rtspClient);
+				}
 
-    env << *rtspClient << "Started playing session";
-    if (scs.duration > 0) {
-      env << " (for up to " << scs.duration << " seconds)";
-    }
-    env << "...\n";
+			LOG_INFO << *rtspClient << "Started playing session";
+			if (scs.duration > 0)
+				{
+					LOG_INFO << " (for up to " << scs.duration << " seconds)";
+				}
+			LOG_INFO << "...";
 
-    success = True;
-  } while (0);
-  delete[] resultString;
+			success = True;
+		}
+	while (0);
+	delete[] resultString;
 
-  if (!success) {
-    // An unrecoverable error occurred with this stream.
-    shutdownStream(rtspClient);
-  }
+	if (!success)
+		{
+			// An unrecoverable error occurred with this stream.
+			shutdownStream(rtspClient);
+		}
 }
 
 
 // Implementation of the other event handlers:
 
-void subsessionAfterPlaying(void* clientData) {
-  MediaSubsession* subsession = (MediaSubsession*)clientData;
-  RTSPClient* rtspClient = (RTSPClient*)(subsession->miscPtr);
+void subsessionAfterPlaying(void* clientData)
+{
+	MediaSubsession* subsession = (MediaSubsession*)clientData;
+	RTSPClient* rtspClient = (RTSPClient*)(subsession->miscPtr);
 
-  // Begin by closing this subsession's stream:
-  Medium::close(subsession->sink);
-  subsession->sink = NULL;
-
-  // Next, check whether *all* subsessions' streams have now been closed:
-  MediaSession& session = subsession->parentSession();
-  MediaSubsessionIterator iter(session);
-  while ((subsession = iter.next()) != NULL) {
-    if (subsession->sink != NULL) return; // this subsession is still active
-  }
-
-  // All subsessions' streams have now been closed, so shutdown the client:
-  shutdownStream(rtspClient);
-}
-
-void subsessionByeHandler(void* clientData) {
-  MediaSubsession* subsession = (MediaSubsession*)clientData;
-  RTSPClient* rtspClient = (RTSPClient*)subsession->miscPtr;
-  UsageEnvironment& env = rtspClient->envir(); // alias
-
-  env << *rtspClient << "Received RTCP \"BYE\" on \"" << *subsession << "\" subsession\n";
-
-  // Now act as if the subsession had closed:
-  subsessionAfterPlaying(subsession);
-}
-
-void streamTimerHandler(void* clientData) {
-  ourRTSPClient* rtspClient = (ourRTSPClient*)clientData;
-  StreamClientState& scs = rtspClient->scs; // alias
-
-  scs.streamTimerTask = NULL;
-
-  // Shut down the stream:
-  shutdownStream(rtspClient);
-}
-
-void shutdownStream(RTSPClient* rtspClient, int exitCode) {
-  UsageEnvironment& env = rtspClient->envir(); // alias
-  StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias
-
-  // First, check whether any subsessions have still to be closed:
-  if (scs.session != NULL) { 
-    Boolean someSubsessionsWereActive = False;
-    MediaSubsessionIterator iter(*scs.session);
-    MediaSubsession* subsession;
-
-    while ((subsession = iter.next()) != NULL) {
-      if (subsession->sink != NULL) {
+	// Begin by closing this subsession's stream:
 	Medium::close(subsession->sink);
 	subsession->sink = NULL;
 
-	if (subsession->rtcpInstance() != NULL) {
-	  subsession->rtcpInstance()->setByeHandler(NULL, NULL); // in case the server sends a RTCP "BYE" while handling "TEARDOWN"
-	}
+	// Next, check whether *all* subsessions' streams have now been closed:
+	MediaSession& session = subsession->parentSession();
+	MediaSubsessionIterator iter(session);
+	while ((subsession = iter.next()) != NULL)
+		{
+			if (subsession->sink != NULL) return; // this subsession is still active
+		}
 
-	someSubsessionsWereActive = True;
-      }
-    }
+	// All subsessions' streams have now been closed, so shutdown the client:
+	shutdownStream(rtspClient);
+}
 
-    if (someSubsessionsWereActive) {
-      // Send a RTSP "TEARDOWN" command, to tell the server to shutdown the stream.
-      // Don't bother handling the response to the "TEARDOWN".
-      rtspClient->sendTeardownCommand(*scs.session, NULL);
-    }
-  }
+void subsessionByeHandler(void* clientData)
+{
+	MediaSubsession* subsession = (MediaSubsession*)clientData;
+	RTSPClient* rtspClient = (RTSPClient*)subsession->miscPtr;
+	UsageEnvironment& env = rtspClient->envir(); // alias
 
-  env << *rtspClient << "Closing the stream.\n";
-  Medium::close(rtspClient);
-    // Note that this will also cause this stream's "StreamClientState" structure to get reclaimed.
+	LOG_INFO << *rtspClient << "Received RTCP \"BYE\" on \"" << *subsession << "\" subsession";
 
-  if (--rtspClientCount == 0) {
-    // The final stream has ended, so exit the application now.
-    // (Of course, if you're embedding this code into your own application, you might want to comment this out,
-    // and replace it with "eventLoopWatchVariable = 1;", so that we leave the LIVE555 event loop, and continue running "main()".)
-    exit(exitCode);
-  }
+	// Now act as if the subsession had closed:
+	subsessionAfterPlaying(subsession);
+}
+
+void streamTimerHandler(void* clientData)
+{
+	ourRTSPClient* rtspClient = (ourRTSPClient*)clientData;
+	StreamClientState& scs = rtspClient->scs; // alias
+
+	scs.streamTimerTask = NULL;
+
+	// Shut down the stream:
+	shutdownStream(rtspClient);
+}
+
+void shutdownStream(RTSPClient* rtspClient, int exitCode)
+{
+	UsageEnvironment& env = rtspClient->envir(); // alias
+	StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias
+
+	// First, check whether any subsessions have still to be closed:
+	if (scs.session != NULL)
+		{
+			Boolean someSubsessionsWereActive = False;
+			MediaSubsessionIterator iter(*scs.session);
+			MediaSubsession* subsession;
+
+			while ((subsession = iter.next()) != NULL)
+				{
+					if (subsession->sink != NULL)
+						{
+							Medium::close(subsession->sink);
+							subsession->sink = NULL;
+
+							if (subsession->rtcpInstance() != NULL)
+								{
+									subsession->rtcpInstance()->setByeHandler(NULL, NULL); // in case the server sends a RTCP "BYE" while handling "TEARDOWN"
+								}
+
+							someSubsessionsWereActive = True;
+						}
+				}
+
+			if (someSubsessionsWereActive)
+				{
+					// Send a RTSP "TEARDOWN" command, to tell the server to shutdown the stream.
+					// Don't bother handling the response to the "TEARDOWN".
+					rtspClient->sendTeardownCommand(*scs.session, NULL);
+				}
+		}
+
+	LOG_NOTICE << *rtspClient << "Closing the stream.";
+	Medium::close(rtspClient);
+	// Note that this will also cause this stream's "StreamClientState" structure to get reclaimed.
+
+	if (--rtspClientCount == 0)
+		{
+			// The final stream has ended, so exit the application now.
+			// (Of course, if you're embedding this code into your own application, you might want to comment this out,
+			// and replace it with "eventLoopWatchVariable = 1;", so that we leave the LIVE555 event loop, and continue running "main()".)
+			exit(exitCode);
+		}
 }
 
 
 // Implementation of "ourRTSPClient":
 
-ourRTSPClient* ourRTSPClient::createNew(UsageEnvironment& env, const RTSPConfig& _rtspConfig)
+ourRTSPClient* ourRTSPClient::createNew(UsageEnvironment& env, const PL_RTSPClient_Config& _rtspConfig)
 {
-  return new ourRTSPClient(env, _rtspConfig);
+	return new ourRTSPClient(env, _rtspConfig);
 }
 
-ourRTSPClient::ourRTSPClient(UsageEnvironment& env, const RTSPConfig& _rtspConfig)
-  : RTSPClient(env, _rtspConfig.rtspURL.c_str(), _rtspConfig.verbosityLevel, _rtspConfig.progName.c_str(), 
-				_rtspConfig.tunnelOverHTTPPortNum, -1), rtspConfig(_rtspConfig)
+ourRTSPClient::ourRTSPClient(UsageEnvironment& env, const PL_RTSPClient_Config& _rtspConfig)
+	: RTSPClient(env, _rtspConfig.rtspURL.c_str(), _rtspConfig.verbosityLevel, _rtspConfig.progName.c_str(),
+	             _rtspConfig.tunnelOverHTTPPortNum, -1), rtspConfig(_rtspConfig)
 {
 }
 
-ourRTSPClient::~ourRTSPClient() {
+ourRTSPClient::~ourRTSPClient()
+{
 }
 
 
 // Implementation of "StreamClientState":
 
 StreamClientState::StreamClientState()
-  : iter(NULL), session(NULL), subsession(NULL), streamTimerTask(NULL), duration(0.0) {
+	: iter(NULL), session(NULL), subsession(NULL), streamTimerTask(NULL), duration(0.0)
+{
 }
 
-StreamClientState::~StreamClientState() {
-  delete iter;
-  if (session != NULL) {
-    // We also need to delete "session", and unschedule "streamTimerTask" (if set)
-    UsageEnvironment& env = session->envir(); // alias
+StreamClientState::~StreamClientState()
+{
+	delete iter;
+	if (session != NULL)
+		{
+			// We also need to delete "session", and unschedule "streamTimerTask" (if set)
+			UsageEnvironment& env = session->envir(); // alias
 
-    env.taskScheduler().unscheduleDelayedTask(streamTimerTask);
-    Medium::close(session);
-  }
+			env.taskScheduler().unscheduleDelayedTask(streamTimerTask);
+			Medium::close(session);
+		}
 }
 
 // Implementation of "DummySink":
 
-DummySink* DummySink::createNew(UsageEnvironment& env, const RTSPConfig& _rtspConfig, MediaSubsession& subsession, char const* streamId)
+DummySink* DummySink::createNew(UsageEnvironment& env, const PL_RTSPClient_Config& _rtspConfig, MediaSubsession& subsession, char const* streamId)
 {
-  return new DummySink(env, _rtspConfig, subsession, streamId);
+	return new DummySink(env, _rtspConfig, subsession, streamId);
 }
 
-DummySink::DummySink(UsageEnvironment& env, const RTSPConfig& _rtspConfig, MediaSubsession& subsession, char const* streamId)
-  : MediaSink(env), rtspConfig(_rtspConfig), fSubsession(subsession)
+DummySink::DummySink(UsageEnvironment& env, const PL_RTSPClient_Config& _rtspConfig, MediaSubsession& subsession, char const* streamId)
+	: MediaSink(env), rtspConfig(_rtspConfig), fSubsession(subsession)
 {
 	fStreamId = strDup(streamId);
 	fReceiveBuffer = new u_int8_t[DUMMY_SINK_RECEIVE_BUFFER_SIZE];
 
 	// ffmpeg need AUX header
 	if (rtspConfig.aux)
-	{
-		fReceiveBuffer[0]=0x00; fReceiveBuffer[1]=0x00; fReceiveBuffer[2]=0x00; fReceiveBuffer[3]=0x01;
-	}
+		{
+			fReceiveBuffer[0]=0x00;
+			fReceiveBuffer[1]=0x00;
+			fReceiveBuffer[2]=0x00;
+			fReceiveBuffer[3]=0x01;
+		}
 
 	//parse sdp
 	const char* strSDP = fSubsession.savedSDPLines();
 	rtsp_client_sdp_callback(rtspConfig.args, strSDP);
-	
+
 	const char* strFmtp = fSubsession.fmtp_spropparametersets();
 	rtsp_client_fmtp_callback(rtspConfig.args, strFmtp);
 	//std::cout << strFmtp << std::endl;
 }
 
-DummySink::~DummySink() {
-  delete[] fReceiveBuffer;
-  delete[] fStreamId;
+DummySink::~DummySink()
+{
+	delete[] fReceiveBuffer;
+	delete[] fStreamId;
 }
 
 void DummySink::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes,
-				  struct timeval presentationTime, unsigned durationInMicroseconds)
+                                  struct timeval presentationTime, unsigned durationInMicroseconds)
 {
 	DummySink* sink = (DummySink*)clientData;
 
 	if (frameSize > 0)
-	{
-		unsigned s = frameSize;
-		if (sink->rtspConfig.aux)
-			s += 4;
-		rtsp_client_frame_callback(sink->rtspConfig.args, sink->fReceiveBuffer, s, presentationTime);
-	}
-  
+		{
+			unsigned s = frameSize;
+			if (sink->rtspConfig.aux)
+				s += 4;
+			rtsp_client_frame_callback(sink->rtspConfig.args, sink->fReceiveBuffer, s, presentationTime);
+		}
+
 	sink->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds);
 }
 
 void DummySink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
-				  struct timeval presentationTime, unsigned /*durationInMicroseconds*/) {
-  // We've just received a frame of data.  (Optionally) print out information about it:
+                                  struct timeval presentationTime, unsigned /*durationInMicroseconds*/)
+{
+	// We've just received a frame of data.  (Optionally) print out information about it:
 #ifdef DEBUG_PRINT_EACH_RECEIVED_FRAME
-  if (fStreamId != NULL) envir() << "Stream \"" << fStreamId << "\"; ";
-  envir() << fSubsession.mediumName() << "/" << fSubsession.codecName() << ":\tReceived " << frameSize << " bytes";
-  if (numTruncatedBytes > 0) envir() << " (with " << numTruncatedBytes << " bytes truncated)";
-  char uSecsStr[6+1]; // used to output the 'microseconds' part of the presentation time
-  sprintf(uSecsStr, "%06u", (unsigned)presentationTime.tv_usec);
-  envir() << ".\tPresentation time: " << (int)presentationTime.tv_sec << "." << uSecsStr;
-  if (fSubsession.rtpSource() != NULL && !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) {
-    envir() << "!"; // mark the debugging output to indicate that this presentation time is not RTCP-synchronized
-  }
+	if (fStreamId != NULL)
+		LOG_DEBUG << "Stream \"" << fStreamId << "\"; ";
+	LOG_DEBUG << "\t" << fSubsession.mediumName() << "/" << fSubsession.codecName() << ":\tReceived " << frameSize << " bytes";
+	if (numTruncatedBytes > 0)
+		LOG_DEBUG << " (with " << numTruncatedBytes << " bytes truncated)";
+	
+	char uSecsStr[6+1]; // used to output the 'microseconds' part of the presentation time
+	sprintf(uSecsStr, "%06u", (unsigned)presentationTime.tv_usec);
+	LOG_DEBUG << "\tPresentation time: " << (int)presentationTime.tv_sec << "." << uSecsStr;
+	if (fSubsession.rtpSource() != NULL && !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP())
+		{
+			LOG_DEBUG << "\tPTS not RTCP-synchronized"; // mark the debugging output to indicate that this presentation time is not RTCP-synchronized
+		}
 #ifdef DEBUG_PRINT_NPT
-  envir() << "\tNPT: " << fSubsession.getNormalPlayTime(presentationTime);
+	LOG_DEBUG << "\tNPT: " << fSubsession.getNormalPlayTime(presentationTime);
 #endif
-  envir() << "\n";
 #endif
-  
-  // Then continue, to request the next frame of data:
-  continuePlaying();
+
+	// Then continue, to request the next frame of data:
+	continuePlaying();
 }
 
 Boolean DummySink::continuePlaying()
 {
-	if (fSource == NULL) return False; // sanity check (should not happen)
+	if (fSource == NULL)
+		return False; // sanity check (should not happen)
 
 	rtsp_client_continue_callback(rtspConfig.args);
 
@@ -580,8 +636,8 @@
 
 	// Request the next frame of data from our input source.  "afterGettingFrame()" will get called later, when it arrives:
 	fSource->getNextFrame(b, DUMMY_SINK_RECEIVE_BUFFER_SIZE,
-							afterGettingFrame, this,
-							onSourceClosure, this);
+	                      afterGettingFrame, this,
+	                      onSourceClosure, this);
 
 	return True;
 }
diff --git a/RtspFace/logger.h b/RtspFace/logger.h
index fe50d00..5d7b344 100644
--- a/RtspFace/logger.h
+++ b/RtspFace/logger.h
@@ -20,6 +20,12 @@
 #define LOG(__level)  log4cpp::Category::getRoot() << log4cpp::Priority::__level << __FILE__ << ":" << __LINE__ << "\t" 
 #define LOGP(__level, __format, arg...) log4cpp::Category::getRoot().log(log4cpp::Priority::__level, "%s:%d\t" __format, __FILE__, __LINE__, ##arg);
 
+#define LOG_DEBUG    LOG(DEBUG) // Debug message do not care in any production environment
+#define LOG_INFO     LOG(INFO) // Not significant event but useful for deal with online problem
+#define LOG_NOTICE   LOG(NOTICE) // Important event
+#define LOG_WARN     LOG(WARN) // Important event or input which will lead to errors
+#define LOG_ERROR    LOG(ERROR) // Error message means program running in an abnormal (not expected) way
+
 inline void initLogger(int verbose)
 {
 	// initialize log4cpp
@@ -42,7 +48,7 @@
 		default: log.setPriority(log4cpp::Priority::NOTICE); break;
 		
 	}
-	LOG(INFO) << "level:" << log4cpp::Priority::getPriorityName(log.getPriority()); 
+	LOG_INFO << "level:" << log4cpp::Priority::getPriorityName(log.getPriority()); 
 }
 	
 #endif
diff --git a/RtspFace/main.cpp b/RtspFace/main.cpp
index 2e9ae59..8b87ae0 100644
--- a/RtspFace/main.cpp
+++ b/RtspFace/main.cpp
@@ -28,7 +28,7 @@
 	
 	{
 		PL_RTSPClient* rtspClient = (PL_RTSPClient*)pipeLine.push_elem("PL_RTSPClient");
-		RTSPConfig rtspConfig;
+		PL_RTSPClient_Config rtspConfig;
 		rtspConfig.progName = argv[0];
 		rtspConfig.rtspURL = argv[1];
 		rtspConfig.aux = true; // ffmpeg need aux, but live555 not
@@ -38,7 +38,7 @@
 		bool ret = rtspClient->init(&rtspConfig);
 		if (!ret)
 		{
-			LOG(ERROR) << "rtspClient.init error";
+			LOG_ERROR << "rtspClient.init error";
 			exit(EXIT_FAILURE);
 		}
 	}
@@ -65,7 +65,7 @@
 	//	bool ret = queue1->init(&config);
 	//	if (!ret)
 	//	{
-	//		LOG(ERROR) << "queue1.init error";
+	//		LOG_ERROR << "queue1.init error";
 	//		exit(EXIT_FAILURE);
 	//	}
 	//}
@@ -76,20 +76,19 @@
 	}
 	
 	{
-		RTSPServerConfig config;
 		PL_RTSPServer* rtspServer = (PL_RTSPServer*)pipeLine.push_elem("PL_RTSPServer");
-		bool ret = rtspServer->init(&config);
+		bool ret = rtspServer->init(nullptr);
 		if (!ret)
 		{
-			LOG(ERROR) << "rtspServer.init error";
+			LOG_ERROR << "rtspServer.init error";
 			exit(EXIT_FAILURE);
 		}
 	}
 	
 	while(true)
 	{
-		//LOG(ERROR) << "begin pipe";
+		//LOG_ERROR << "begin pipe";
 		pipeLine.pipe();
-		//LOG(ERROR) << "end pipe";
+		//LOG_ERROR << "end pipe";
 	}
 }

--
Gitblit v1.8.0