From ac3098578b55b8556a6e498a9ea1e936be588594 Mon Sep 17 00:00:00 2001
From: xuxiuxi <xuxiuxi@454eff88-639b-444f-9e54-f578c98de674>
Date: 星期三, 12 四月 2017 13:38:20 +0800
Subject: [PATCH]
---
RtspFace/PL_H264Encoder.cpp | 365 ++++++++++++++++++++++++++++++++++++++++++++++++----
1 files changed, 337 insertions(+), 28 deletions(-)
diff --git a/RtspFace/PL_H264Encoder.cpp b/RtspFace/PL_H264Encoder.cpp
index ebdfb7f..68c502c 100644
--- a/RtspFace/PL_H264Encoder.cpp
+++ b/RtspFace/PL_H264Encoder.cpp
@@ -1,32 +1,78 @@
#include "PL_H264Encoder.h"
+#include "MaterialBuffer.h"
+#include "logger.h"
extern "C"
{
- #include <libyuv.h>
+ #include <libavcodec/avcodec.h>
+ #include <libavutil/frame.h>
+ #include <libavutil/imgutils.h>
+ #include <libavutil/opt.h>
+ #include <libavformat/avformat.h>
}
-struct PL_H264Encoder_Internal
-{
- uint8_t buffer[1920*1080*4];
- size_t buffSize;
- size_t buffSizeMax;
+#include <libyuv.h>
+PL_H264Encoder_Config::PL_H264Encoder_Config() :
+ inBufferSize(2*1024*1024), // 2MByte
+ resetPTS(false),
+ bytesBufferImageWidth(0), bytesBufferImageHeight(0),
+ avc_bit_rate(1*1024*1024*8), //1Mbit
+ avc_fps(25), avc_gop(25), avc_max_b_frames(0), avc_profile(FF_PROFILE_H264_BASELINE),
+ av_opt_preset("superfast"), av_opt_tune(""), avc_profile_str("")
+{
+ // av_opt_tune: zerolatency
+}
+
+struct H264Encoder_Internal
+{
+ uint8_t* buffer;
+ size_t buffSize;
bool payError;
+ bool ffmpegInited;
+ size_t frameCount;
+ MB_Frame lastFrame;
+ PL_H264Encoder_Config config;
- PL_H264Encoder_Internal() :
- buffSize(0), buffSizeMax(sizeof(buffer)),
- payError(true)
+ AVCodecContext* pAVCodecContext;
+ AVFrame* pAVFrame;//#todo delete
+ AVFormatContext* pAVFormatContext;
+
+ H264Encoder_Internal() :
+ buffer(nullptr), buffSize(0),
+ payError(true), ffmpegInited(false), frameCount(0), lastFrame(), config(),
+ pAVCodecContext(nullptr), pAVFrame(nullptr), pAVFormatContext(nullptr)
{
}
- ~PL_H264Encoder_Internal()
+ ~H264Encoder_Internal()
{
+ delete[] buffer;
+ buffer = nullptr;
}
void reset()
{
buffSize = 0;
payError = true;
+ ffmpegInited = false;
+ frameCount = 0;
+
+ MB_Frame _lastFrame;
+ lastFrame = _lastFrame;
+
+ PL_H264Encoder_Config _config;
+ config = _config;
+
+ pAVCodecContext = nullptr;
+ pAVFrame = nullptr;
+ pAVFormatContext = nullptr;
+
+ if (buffer != nullptr)
+ {
+ delete[] buffer;
+ buffer = nullptr;
+ }
}
};
@@ -35,52 +81,315 @@
return new PL_H264Encoder;
}
-PL_H264Encoder::PL_H264Encoder() : internal(new PL_H264Encoder_Internal)
+PL_H264Encoder::PL_H264Encoder() : internal(new H264Encoder_Internal)
{
}
PL_H264Encoder::~PL_H264Encoder()
{
- delete (PL_H264Encoder_Internal*)internal;
+ delete (H264Encoder_Internal*)internal;
internal= nullptr;
}
bool PL_H264Encoder::init(void* args)
{
- PL_H264Encoder_Internal* in = (PL_H264Encoder_Internal*)internal;
+ H264Encoder_Internal* in = (H264Encoder_Internal*)internal;
in->reset();
-
+
+ if (args != nullptr)
+ {
+ PL_H264Encoder_Config* config = (PL_H264Encoder_Config*)args;
+ in->config = *config;
+ }
+
+ in->buffer = new uint8_t[in->config.inBufferSize];
+
return true;
}
void PL_H264Encoder::finit()
{
- PL_H264Encoder_Internal* in = (PL_H264Encoder_Internal*)internal;
+ H264Encoder_Internal* in = (H264Encoder_Internal*)internal;
+}
+
+bool initH264EncoderEnv(H264Encoder_Internal* in)
+{
+ av_register_all();
+
+ // find the video encoder
+ AVCodec* avCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
+
+ if (!avCodec)
+ {
+ LOG_ERROR << "codec not found!" << std::endl;
+ return false;
+ }
+
+ in->pAVCodecContext = avcodec_alloc_context3(avCodec);
+
+ in->pAVCodecContext->bit_rate = in->config.avc_bit_rate;
+ in->pAVCodecContext->width = in->config.bytesBufferImageWidth;
+ in->pAVCodecContext->height = in->config.bytesBufferImageHeight;
+ in->pAVCodecContext->time_base.num = 1;
+ in->pAVCodecContext->time_base.den = in->config.avc_fps;
+ in->pAVCodecContext->gop_size = in->config.avc_gop;
+ in->pAVCodecContext->max_b_frames = in->config.avc_max_b_frames;
+ in->pAVCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
+
+ if (!in->config.av_opt_preset.empty())
+ av_opt_set(in->pAVCodecContext->priv_data, "preset", in->config.av_opt_preset.c_str(), 0);
+ if (!in->config.av_opt_tune.empty())
+ av_opt_set(in->pAVCodecContext->priv_data, "tune", in->config.av_opt_tune.c_str(), 0);
+ if (!in->config.avc_profile_str.empty())
+ av_opt_set(in->pAVCodecContext->priv_data, "profile", in->config.avc_profile_str.c_str(), 0);
+ else
+ in->pAVCodecContext->profile = in->config.avc_profile;
+
+ if(avcodec_open2(in->pAVCodecContext, avCodec, NULL) >= 0)
+ {
+ in->pAVFrame = av_frame_alloc(); // Allocate video frame
+
+ in->pAVFrame->format = in->pAVCodecContext->pix_fmt;
+ in->pAVFrame->width = in->pAVCodecContext->width;
+ in->pAVFrame->height = in->pAVCodecContext->height;
+
+ int ret = av_image_alloc(in->pAVFrame->data, in->pAVFrame->linesize,
+ in->pAVCodecContext->width, in->pAVCodecContext->height,
+ in->pAVCodecContext->pix_fmt, 16);
+ if (ret < 0)
+ {
+ LOG_ERROR << "av_image_alloc error" << std::endl;
+ return false;
+ }
+ }
+ else
+ {
+ LOG_ERROR << "avcodec_open2 error" << std::endl;
+ return false;
+ }
+
+ return true;
+}
+
+void copyAVFrame(AVFrame* dest, AVFrame* src)
+{
+//#test
+//#define SUBSAMPLE(v, a) ((((v) + (a) - 1)) / (a))
+ //int src_width = src->width;
+ //int src_height = src->height;
+ //int dst_width = dest->width;
+ //int dst_height = dest->height;
+ //printf("I420Scale sw=%d, sh=%d, dw=%d, dh=%d\n", src_width,src_height,dst_width, dst_height);
+ //
+ //libyuv::I420Scale(src->data[0], src_width,
+ // src->data[1], SUBSAMPLE(src_width, 2),
+ // src->data[2], SUBSAMPLE(src_width, 2),
+ // src_width, src_height,
+ // dest->data[0], dst_width,
+ // dest->data[1], SUBSAMPLE(dst_width, 2),
+ // dest->data[2], SUBSAMPLE(dst_width, 2),
+ // dst_width, dst_height,
+ // libyuv::kFilterNone );
+
+
+ //static size_t f=0;
+ //char fname[50];
+ //sprintf(fname, "%u.yuv420", ++f);
+ //FILE * pFile = fopen (fname,"wb");
+ //fwrite (dest->data[0] , sizeof(char), dst_width * dst_height, pFile);
+ //fwrite (dest->data[1] , sizeof(char), dst_width * dst_height / 4, pFile);
+ //fwrite (dest->data[2] , sizeof(char), dst_width * dst_height / 4, pFile);
+ //fclose(pFile);
+
+ dest->data[0] = src->data[0];
+ dest->data[1] = src->data[1];
+ dest->data[2] = src->data[2];
+
+ //int height = dest->height;
+ //int width = dest->width;
+ //memcpy(dest->data[0], src->data[0], height * width); // Y
+ //memcpy(dest->data[1], src->data[1], height * width / 4); // U
+ //memcpy(dest->data[2], src->data[2], height * width / 4); // V
+}
+
+bool encodeH264(H264Encoder_Internal* in, AVFrame* pAVFrame, timeval pts)
+{
+ in->buffSize = 0;
+
+ copyAVFrame(in->pAVFrame, pAVFrame);
+
+ //in->pAVFrame->pts = (1.0 / 25) * 90000 * in->frameCount;
+ //in->pAVFrame->pts = time(nullptr);
+ //in->pAVFrame->pts = (pts.tv_sec * 1000 * 1000 + pts.tv_usec) / 90000 + in->frameCount;
+ //in->pAVFrame->pts = (pts.tv_sec * 1000 * 1000 + pts.tv_usec) / 90000 + ((1.0 / 25) * 90000 * in->frameCount);
+ in->pAVFrame->pts = (pts.tv_sec * 1000 * 1000 + pts.tv_usec) / 90000;
+ //in->pAVFrame->pts = pAVFrame->pkt_pts;
+ //in->pAVFrame->pts = (1.0 / 25) * 90000 * in->frameCount;
+
+ AVPacket pAVPacket = {0};
+ av_init_packet(&pAVPacket);
+
+ // encode the image
+ int gotPacket = 0;
+
+ int ret = avcodec_encode_video2(in->pAVCodecContext, &pAVPacket, in->pAVFrame, &gotPacket);
+ if (ret < 0)
+ {
+ LOG_WARN << "avcodec_encode_video2 (1) error=" << ret << std::endl;
+ return false;
+ }
+
+ if (gotPacket > 0)
+ {
+ in->frameCount++;
+ LOGP(DEBUG, "Succeed to encode (1) frame=%d, size=%d", in->frameCount, pAVPacket.size);
+ memcpy(in->buffer, pAVPacket.data, pAVPacket.size);//#todo check inBufferSize
+ in->buffSize = pAVPacket.size;
+ av_free_packet(&pAVPacket);
+ }
+
+ //#todo finit
+ //Flush Encoder, when stop encoder
+ //while (gotPacket > 0)
+ //{
+ // ret = avcodec_encode_video2(in->pAVCodecContext, &pAVPacket, NULL, &gotPacket);
+ // if (ret < 0)
+ // {
+ // printf("avcodec_encode_video2 (2) error=%d\n", ret);
+ // return false;
+ // }
+ // if (gotPacket > 0)
+ // {
+ // printf("Succeed to encode (2) frame=%d, size=%d\n", in->frameCount, pAVPacket.size);
+ // memcpy(in->buffer + in->buffSize, pAVPacket.data, pAVPacket.size);
+ // in->buffSize += pAVPacket.size;
+ // av_free_packet(&pAVPacket);
+ // }
+ //}
+
+
+ //#test
+ //if (in->buffSize > 0)
+ //{
+ // static FILE * pFile = fopen("out.h264","wba+");
+ // fwrite (in->buffer , sizeof(char), in->buffSize, pFile);
+ // fflush(pFile);
+ //}
+
+ in->payError = (in->buffSize == 0);
+ return !(in->payError);
+}
+
+bool encodeH264(H264Encoder_Internal* in, uint8_t* buffer, timeval pts)
+{
+ uint16_t width = in->config.bytesBufferImageWidth;
+ uint16_t height = in->config.bytesBufferImageHeight;
+
+ AVFrame avFrame;
+ avFrame.width = width;
+ avFrame.height = height;
+ avFrame.data[0] = buffer;
+ avFrame.data[1] = buffer + width*height;
+ avFrame.data[2] = buffer + width*height + width*height/4;
+ return encodeH264(in, &avFrame, pts);
}
bool PL_H264Encoder::pay(const PipeMaterial& pm)
{
- PL_H264Encoder_Internal* in = (PL_H264Encoder_Internal*)internal;
+ H264Encoder_Internal* in = (H264Encoder_Internal*)internal;
- //in->buffer readly
+ in->payError = true;
+
+ if (!in->ffmpegInited)
+ {
+ MB_Frame* frame = (MB_Frame*)pm.buffer;
+ if (frame != nullptr && frame->buffer != nullptr &&
+ (in->config.bytesBufferImageWidth == 0 || in->config.bytesBufferImageHeight == 0))
+ {
+ if (frame->type == MB_Frame::MBFT_PTR_AVFRAME)
+ {
+ AVFrame* pAVFrame = (AVFrame*)frame->buffer;
+ if (pAVFrame != nullptr)
+ {
+ in->config.bytesBufferImageWidth = pAVFrame->width;
+ in->config.bytesBufferImageHeight = pAVFrame->height;
+ LOGP(NOTICE, "Set codec size from AVFrame width=%d, height=%d",
+ in->config.bytesBufferImageWidth, in->config.bytesBufferImageHeight);
+ }
+ }
+ else if (frame->type == MB_Frame::MBFT_YUV420)
+ {
+ in->config.bytesBufferImageWidth = frame->width;
+ in->config.bytesBufferImageHeight = frame->height;
+ LOGP(NOTICE, "Set codec size from frame width=%d, height=%d",
+ in->config.bytesBufferImageWidth, in->config.bytesBufferImageHeight);
+ }
+ }
+
+ bool ret = initH264EncoderEnv(in);
+ if (!ret)
+ {
+ LOG_ERROR << "initH264EncoderEnv error" << std::endl;
+ return false;
+ }
+ else
+ in->ffmpegInited = true;
+ }
+
+ if (pm.type != PipeMaterial::PMT_FRAME)
+ {
+ LOG_ERROR << "Only support PMT_FRAME" << std::endl;
+ return false;
+ }
+
+ if (pm.buffer == nullptr)
+ return false;
+
+ MB_Frame* frame = (MB_Frame*)pm.buffer;
+
+ bool ret;
+
+ if (frame->type == MB_Frame::MBFT_PTR_AVFRAME)
+ ret = encodeH264(in, (AVFrame*)(frame->buffer), frame->pts);
+ else if (frame->type == MB_Frame::MBFT_YUV420)
+ ret = encodeH264(in, (uint8_t*)(frame->buffer), frame->pts);
+ else
+ {
+ LOG_ERROR << "Only support MBFT_PTR_AVFRAME / MBFT_YUV420" << std::endl;
+ in->payError = true;
+ return false;
+ }
- //static size_t f=0;
- //char fname[50];
- //sprintf(fname, "%u.bgra", ++f);
- //FILE * pFile = fopen (fname,"wb");
- //fwrite (in->buffer , sizeof(char), in->buffSize, pFile);
- //fclose(pFile);
+ in->payError = !ret;
+
+ if (ret)
+ {
+ in->lastFrame.type = MB_Frame::MBFT_H264_NALU;
+ in->lastFrame.buffer = in->buffer;
+ in->lastFrame.buffSize = in->buffSize;
+ in->lastFrame.width = frame->width;
+ in->lastFrame.height = frame->height;
- return true;
+ if (in->config.resetPTS)
+ gettimeofday(&(in->lastFrame.pts),NULL);
+ else
+ in->lastFrame.pts = frame->pts;
+ }
+
+ return ret;
}
bool PL_H264Encoder::gain(PipeMaterial& pm)
{
- PL_H264Encoder_Internal* in = (PL_H264Encoder_Internal*)internal;
+ H264Encoder_Internal* in = (H264Encoder_Internal*)internal;
- pm.buffer = in->buffer;
- pm.buffSize = in->buffSize;
+ if (!in->payError)
+ {
+ pm.type = PipeMaterial::PMT_FRAME;
+ pm.buffer = &(in->lastFrame);
+ pm.buffSize = 0;
+ }
pm.former = this;
- return true;
+ return !in->payError;
}
--
Gitblit v1.8.0