From 9d9849175b11b3ba9918ad4f980aa4a1c7c2afb0 Mon Sep 17 00:00:00 2001
From: houxiao <houxiao@454eff88-639b-444f-9e54-f578c98de674>
Date: 星期四, 22 十二月 2016 14:39:50 +0800
Subject: [PATCH] add pipeline

---
 RtspFace/RTSPClient.hpp |  271 +++++++++++------------------------------------------
 1 files changed, 58 insertions(+), 213 deletions(-)

diff --git a/RtspFace/RTSPClient.cpp b/RtspFace/RTSPClient.hpp
similarity index 74%
rename from RtspFace/RTSPClient.cpp
rename to RtspFace/RTSPClient.hpp
index a306d89..78088ef 100644
--- a/RtspFace/RTSPClient.cpp
+++ b/RtspFace/RTSPClient.hpp
@@ -24,10 +24,6 @@
 #include "BasicUsageEnvironment.hh"
 
 #include <iostream>
-#include <libbase64.h>
-
-bool initH264DecoderEnv(uint8_t* sps, size_t spsSize, uint8_t* pps, size_t ppsSize);
-int decodeH264(uint8_t* pBuffer, int dwBufsize, const char *outfile)  ;
 
 // Forward function definitions:
 
@@ -43,7 +39,7 @@
   // called at the end of a stream's expected duration (if the stream has not already signaled its end using a RTCP "BYE")
 
 // The main streaming routine (for each "rtsp://" URL):
-void openURL(UsageEnvironment& env, char const* progName, char const* rtspURL);
+void openURL(UsageEnvironment& env, void* args, char const* progName, char const* rtspURL);
 
 // Used to iterate through each stream's 'subsessions', setting up each one:
 void setupNextSubsession(RTSPClient* rtspClient);
@@ -68,7 +64,7 @@
 
 char eventLoopWatchVariable = 0;
 
-int main(int argc, char** argv) {
+int test_main(int argc, char** argv) {
   // Begin by setting up our usage environment:
   TaskScheduler* scheduler = BasicTaskScheduler::createNew();
   UsageEnvironment* env = BasicUsageEnvironment::createNew(*scheduler);
@@ -81,7 +77,7 @@
 
   // There are argc-1 URLs: argv[1] through argv[argc-1].  Open and start streaming each one:
   for (int i = 1; i <= argc-1; ++i) {
-    openURL(*env, argv[0], argv[i]);
+    openURL(*env, NULL, argv[0], argv[i]);
   }
 
   // All subsequent activity takes place within the event loop:
@@ -134,6 +130,7 @@
 
 public:
   StreamClientState scs;
+  void* args;
 };
 
 // Define a data sink (a subclass of "MediaSink") to receive the data for each subsession (i.e., each audio or video 'substream').
@@ -141,39 +138,44 @@
 // Or it might be a "FileSink", for outputting the received data into a file (as is done by the "openRTSP" application).
 // In this example code, however, we define a simple 'dummy' sink that receives incoming data, but does nothing with it.
 
-class DummySink: public MediaSink {
+class DummySink: public MediaSink
+{
 public:
-  static DummySink* createNew(UsageEnvironment& env,
-			      MediaSubsession& subsession, // identifies the kind of data that's being received
-			      char const* streamId = NULL); // identifies the stream itself (optional)
+	static DummySink* createNew(UsageEnvironment& env, 
+					void* _args, 
+				  MediaSubsession& subsession, // identifies the kind of data that's being received
+				  char const* streamId = NULL); // identifies the stream itself (optional)
 
 private:
-  DummySink(UsageEnvironment& env, MediaSubsession& subsession, char const* streamId);
-    // called only by "createNew()"
-  virtual ~DummySink();
+	DummySink(UsageEnvironment& env, void* _args, MediaSubsession& subsession, char const* streamId);
+	// called only by "createNew()"
+	virtual ~DummySink();
 
-  static void afterGettingFrame(void* clientData, unsigned frameSize,
-                                unsigned numTruncatedBytes,
+	static void afterGettingFrame(void* clientData, unsigned frameSize,
+								unsigned numTruncatedBytes,
 				struct timeval presentationTime,
-                                unsigned durationInMicroseconds);
-  void afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
+								unsigned durationInMicroseconds);
+	void afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
 			 struct timeval presentationTime, unsigned durationInMicroseconds);
 
-private:
-  // redefined virtual functions:
-  virtual Boolean continuePlaying();
+public:
+	void* args;
 
 private:
-  u_int8_t* fReceiveBuffer;
-  MediaSubsession& fSubsession;
-  char* fStreamId;
+	// redefined virtual functions:
+	virtual Boolean continuePlaying();
+
+private:
+	u_int8_t* fReceiveBuffer;
+	MediaSubsession& fSubsession;
+	char* fStreamId;
 };
 
 #define RTSP_CLIENT_VERBOSITY_LEVEL 1 // by default, print verbose output from each "RTSPClient"
 
 static unsigned rtspClientCount = 0; // Counts how many streams (i.e., "RTSPClient"s) are currently in use.
 
-void openURL(UsageEnvironment& env, char const* progName, char const* rtspURL) {
+void openURL(UsageEnvironment& env, void* args, char const* progName, char const* rtspURL) {
   // Begin by creating a "RTSPClient" object.  Note that there is a separate "RTSPClient" object for each stream that we wish
   // to receive (even if more than stream uses the same "rtsp://" URL).
   RTSPClient* rtspClient = ourRTSPClient::createNew(env, rtspURL, RTSP_CLIENT_VERBOSITY_LEVEL, progName);
@@ -181,6 +183,8 @@
     env << "Failed to create a RTSP client for URL \"" << rtspURL << "\": " << env.getResultMsg() << "\n";
     return;
   }
+  
+  ((ourRTSPClient*)rtspClient)->args = args;
 
   ++rtspClientCount;
 
@@ -290,7 +294,9 @@
     // (This will prepare the data sink to receive data; the actual flow of data from the client won't start happening until later,
     // after we've sent a RTSP "PLAY" command.)
 
-    scs.subsession->sink = DummySink::createNew(env, *scs.subsession, rtspClient->url());
+	DummySink* mySink;
+    scs.subsession->sink = mySink = DummySink::createNew(env, ((ourRTSPClient*)rtspClient)->args, 
+			*scs.subsession, rtspClient->url());
       // perhaps use your own custom "MediaSink" subclass instead
     if (scs.subsession->sink == NULL) {
       env << *rtspClient << "Failed to create a data sink for the \"" << *scs.subsession
@@ -447,7 +453,9 @@
 
 ourRTSPClient::ourRTSPClient(UsageEnvironment& env, char const* rtspURL,
 			     int verbosityLevel, char const* applicationName, portNumBits tunnelOverHTTPPortNum)
-  : RTSPClient(env,rtspURL, verbosityLevel, applicationName, tunnelOverHTTPPortNum, -1) {
+  : RTSPClient(env,rtspURL, verbosityLevel, applicationName, tunnelOverHTTPPortNum, -1),
+	args(nullptr)
+{
 }
 
 ourRTSPClient::~ourRTSPClient() {
@@ -471,70 +479,33 @@
   }
 }
 
-SPropRecord* parseSPropParameterSets(char const* sPropParameterSetsStr,  
-                                     // result parameter:  
-                                     size_t& numSPropRecords) {  
-  // Make a copy of the input string, so we can replace the commas with '\0's:  
-  char* inStr = strDup(sPropParameterSetsStr);  
-  if (inStr == NULL) {  
-    numSPropRecords = 0;  
-    return NULL;  
-  }  
-  
-  // Count the number of commas (and thus the number of parameter sets):  
-  numSPropRecords = 1;  
-  char* s;  
-  for (s = inStr; *s != '\0'; ++s) {  
-    if (*s == ',') {  
-      ++numSPropRecords;  
-      *s = '\0';  
-    }  
-  }  
-  
-  // Allocate and fill in the result array:  
-  SPropRecord* resultArray = new SPropRecord[numSPropRecords]; //****** 鐪嬪埌 杩欓噷浜� 鎶� *******/  
-  s = inStr;  
-  for (unsigned i = 0; i < numSPropRecords; ++i) {  
-    resultArray[i].sPropBytes = new uint8_t[256];
-	
-	size_t sPropLength = 0;
-	base64_decode(s, strlen(s), (char*)resultArray[i].sPropBytes, &sPropLength, 0);
-	resultArray[i].sPropLength = sPropLength;
-	
-    s += strlen(s) + 1;  
-  }  
-  
-  delete[] inStr;  
-  return resultArray;  
-}
-
 // Implementation of "DummySink":
 
 // Even though we're not going to be doing anything with the incoming data, we still need to receive it.
 // Define the size of the buffer that we'll use:
-#define DUMMY_SINK_RECEIVE_BUFFER_SIZE 100000
+#define DUMMY_SINK_RECEIVE_BUFFER_SIZE 1920*1080*3
 
-DummySink* DummySink::createNew(UsageEnvironment& env, MediaSubsession& subsession, char const* streamId) {
-  return new DummySink(env, subsession, streamId);
+DummySink* DummySink::createNew(UsageEnvironment& env, void* _args, MediaSubsession& subsession, char const* streamId)
+{
+  return new DummySink(env, _args, subsession, streamId);
 }
 
-DummySink::DummySink(UsageEnvironment& env, MediaSubsession& subsession, char const* streamId)
-  : MediaSink(env),
-    fSubsession(subsession) {
-  fStreamId = strDup(streamId);
-  fReceiveBuffer = new u_int8_t[DUMMY_SINK_RECEIVE_BUFFER_SIZE];
-  
-  //parse sdp
-  //const char* strSDP = fSubsession.savedSDPLines();
-  const char* strFmtp = fSubsession.fmtp_spropparametersets();
-  //std::cout << strFmtp << std::endl;
-  
-  size_t numSPropRecords = 0;
-  SPropRecord *p_record = parseSPropParameterSets(fSubsession.fmtp_spropparametersets(), numSPropRecords);
-	SPropRecord &sps = p_record[0];  
-	SPropRecord &pps = p_record[1];
+DummySink::DummySink(UsageEnvironment& env, void* _args, MediaSubsession& subsession, char const* streamId)
+  : MediaSink(env), args(_args), fSubsession(subsession)
+{
+	fStreamId = strDup(streamId);
+	fReceiveBuffer = new u_int8_t[DUMMY_SINK_RECEIVE_BUFFER_SIZE];
+
+	// ffmpeg need AUX header
+	fReceiveBuffer[0]=0x00; fReceiveBuffer[1]=0x00; fReceiveBuffer[2]=0x00; fReceiveBuffer[3]=0x01;
+
+	//parse sdp
+	const char* strSDP = fSubsession.savedSDPLines();
+	rtsp_client_sdp_callback(args, strSDP);
 	
-	initH264DecoderEnv(sps.sPropBytes, sps.sPropLength, pps.sPropBytes, pps.sPropLength);
+	const char* strFmtp = fSubsession.fmtp_spropparametersets();
+	rtsp_client_fmtp_callback(args, strFmtp);
+	//std::cout << strFmtp << std::endl;
 }
 
 DummySink::~DummySink() {
@@ -547,7 +518,7 @@
   DummySink* sink = (DummySink*)clientData;
 
   if (frameSize > 0)
-	decodeH264(sink->fReceiveBuffer, frameSize, NULL);
+	rtsp_client_frame_callback(sink->args, sink->fReceiveBuffer, frameSize + 4);
   
   sink->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds);
 }
@@ -581,137 +552,11 @@
 Boolean DummySink::continuePlaying() {
   if (fSource == NULL) return False; // sanity check (should not happen)
 
+	  rtsp_client_continue_callback(args);
+	  
   // Request the next frame of data from our input source.  "afterGettingFrame()" will get called later, when it arrives:
-  fSource->getNextFrame(fReceiveBuffer, DUMMY_SINK_RECEIVE_BUFFER_SIZE,
+  fSource->getNextFrame(fReceiveBuffer + 4, DUMMY_SINK_RECEIVE_BUFFER_SIZE,
                         afterGettingFrame, this,
                         onSourceClosure, this);
   return True;
-}
-
-
-/*********
-
-*********/
-
-extern "C"
-{
-	#include <libavcodec/avcodec.h>
-	#include <libavutil/frame.h>
-	#include <libavformat/avformat.h>
-}
-
-AVCodecContext* g_pAVCodecContext  = NULL;
-AVFrame* g_pAVFrame = NULL;
-
-bool initH264DecoderEnv(uint8_t* sps, size_t spsSize, uint8_t* pps, size_t ppsSize)
-{
-	av_register_all();
-
-	// find the video encoder
-	AVCodec* avCodec = avcodec_find_decoder(AV_CODEC_ID_H264);
-
-	if (!avCodec)   
-	{  
-		printf("codec not found!\n");  
-		return -1;  
-	}  
-
-	g_pAVCodecContext = avcodec_alloc_context3(avCodec);
-
-	//鍒濆鍖栧弬鏁帮紝涓嬮潰鐨勫弬鏁板簲璇ョ敱鍏蜂綋鐨勪笟鍔″喅瀹�
-	g_pAVCodecContext->time_base.num = 1;
-	g_pAVCodecContext->frame_number = 1; //姣忓寘涓�涓棰戝抚
-	g_pAVCodecContext->codec_type = AVMEDIA_TYPE_VIDEO;
-	g_pAVCodecContext->bit_rate = 0;
-	g_pAVCodecContext->time_base.den = 25;
-	g_pAVCodecContext->width = 1920;
-	g_pAVCodecContext->height = 1080;
-
-	if (g_pAVCodecContext->extradata == NULL)  
-    {  
-        int totalsize = 0;  
-        unsigned char* tmp = NULL;  
-        unsigned char nalu_header[4] = { 0, 0, 0, 1 };  
-  
-        totalsize = 8 + spsSize + ppsSize;  
-  
-        tmp = new unsigned char[totalsize];  
-        memcpy(tmp, nalu_header, 4);  
-        memcpy(tmp + 4, sps, spsSize);  
-        memcpy(tmp + 4 + spsSize, nalu_header, 4);  
-        memcpy(tmp + 4 + spsSize + 4, pps, ppsSize);  
-  
-        g_pAVCodecContext->extradata_size = totalsize; // g_pAVCodecContext 涓烘垜瑙g爜鏃跺�欎娇鐢ㄧ殑涓婁笅鏂�  
-  
-        g_pAVCodecContext->extradata = tmp;  
-    }
-	
-	if(avcodec_open2(g_pAVCodecContext, avCodec, NULL) >= 0)
-		g_pAVFrame = av_frame_alloc();// Allocate video frame
-	else
-		return false;
-	
-	return true;
-}
-
-int decodeH264(uint8_t* pBuffer, int dwBufsize, const char *outfile)  
-{
-	AVPacket packet = {0};
-	int frameFinished = dwBufsize;//杩欎釜鏄殢渚垮~鍏ユ暟瀛楋紝娌′粈涔堜綔鐢�
-
-	uint8_t newBuff[dwBufsize+4];
-	newBuff[0]=0x00; newBuff[1]=0x00; newBuff[2]=0x00; newBuff[3]=0x01;
-	memcpy(newBuff + 4, pBuffer, dwBufsize);
-	
-	//packet.data = pBuffer;//杩欓噷濉叆涓�涓寚鍚戝畬鏁碒264鏁版嵁甯х殑鎸囬拡
-	//packet.size = dwBufsize;//杩欎釜濉叆H264鏁版嵁甯х殑澶у皬
-
-	if (av_packet_from_data(&packet, newBuff, dwBufsize + 4) != 0){  
-		printf("exchange data failed!\n");
-	}  
-	
-	//涓嬮潰寮�濮嬬湡姝g殑瑙g爜
-	avcodec_decode_video2(g_pAVCodecContext, g_pAVFrame, &frameFinished, &packet);
-	if(frameFinished)//鎴愬姛瑙g爜
-	{
-		int picSize = g_pAVCodecContext->height * g_pAVCodecContext->width;
-		int newSize = picSize * 1.5;
-
-		//鐢宠鍐呭瓨
-		uint8_t *buff = new uint8_t[newSize];
-
-		int height = g_pAVFrame->height;
-		int width = g_pAVFrame->width;
-
-		//鍐欏叆鏁版嵁
-		int a=0;
-		for (int i=0; i<height; i++)
-		{
-			memcpy(buff+a,g_pAVFrame->data[0] + i * g_pAVFrame->linesize[0], width);
-			a+=width;
-		}
-		for (int i=0; i<height/2; i++)
-		{
-			memcpy(buff+a,g_pAVFrame->data[1] + i * g_pAVFrame->linesize[1], width/2);
-			a+=width/2;
-		}
-		for (int i=0; i<height/2; i++)
-		{
-			memcpy(buff+a,g_pAVFrame->data[2] + i * g_pAVFrame->linesize[2], width/2);
-			a+=width/2;
-		}
-
-		//buff readly
-
-		//static size_t f=0;
-		//char fname[50];
-		//sprintf(fname, "%u.yuv420", ++f);
-		//FILE * pFile = fopen (fname,"wb");
-		//fwrite (buff , sizeof(char), newSize, pFile);
-		//fclose(pFile);
-		
-		delete[] buff;
-	}
-	else
-		printf("incomplete frame\n");
 }

--
Gitblit v1.8.0