#include "PL_H264Encoder.h"
|
#include "MaterialBuffer.h"
|
|
extern "C"
|
{
|
#include <libavcodec/avcodec.h>
|
#include <libavutil/frame.h>
|
#include <libavutil/imgutils.h>
|
#include <libavutil/opt.h>
|
#include <libavformat/avformat.h>
|
|
#include <libyuv.h>
|
}
|
|
struct H264Encoder_Internal
|
{
|
uint8_t buffer[1920*1080*3];
|
size_t buffSize;
|
size_t buffSizeMax;
|
bool payError;
|
bool ffmpegInited;
|
size_t frameCount;
|
MB_Frame lastFrame;
|
|
AVCodecContext* pAVCodecContext;
|
AVFrame* pAVFrame;//#todo delete
|
AVStream* pAVStream;
|
AVFormatContext* pAVFormatContext;
|
|
H264Encoder_Internal() :
|
buffSize(0), buffSizeMax(sizeof(buffer)),
|
payError(true), ffmpegInited(false), frameCount(0),
|
pAVCodecContext(nullptr), pAVFrame(nullptr), pAVStream(nullptr), pAVFormatContext(nullptr),
|
lastFrame()
|
{
|
}
|
|
~H264Encoder_Internal()
|
{
|
}
|
|
void reset()
|
{
|
buffSize = 0;
|
payError = true;
|
ffmpegInited = false;
|
frameCount = 0;
|
|
MB_Frame _lastFrame;
|
lastFrame = _lastFrame;
|
|
pAVCodecContext = nullptr;
|
pAVFrame = nullptr;
|
pAVStream = nullptr;
|
pAVFormatContext = nullptr;
|
}
|
};
|
|
PipeLineElem* create_PL_H264Encoder()
|
{
|
return new PL_H264Encoder;
|
}
|
|
PL_H264Encoder::PL_H264Encoder() : internal(new H264Encoder_Internal)
|
{
|
}
|
|
PL_H264Encoder::~PL_H264Encoder()
|
{
|
delete (H264Encoder_Internal*)internal;
|
internal= nullptr;
|
}
|
|
bool PL_H264Encoder::init(void* args)
|
{
|
H264Encoder_Internal* in = (H264Encoder_Internal*)internal;
|
in->reset();
|
|
return true;
|
}
|
|
void PL_H264Encoder::finit()
|
{
|
H264Encoder_Internal* in = (H264Encoder_Internal*)internal;
|
|
}
|
|
bool initH264EncoderEnv(H264Encoder_Internal* in)
|
{
|
av_register_all();
|
|
// find the video encoder
|
AVCodec* avCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
|
|
if (!avCodec)
|
{
|
printf("codec not found!\n");
|
return false;
|
}
|
|
in->pAVCodecContext = avcodec_alloc_context3(avCodec);
|
|
in->pAVCodecContext->bit_rate = 1*1024*1024*8; // 3MB
|
in->pAVCodecContext->width = 800;//#todo test
|
in->pAVCodecContext->height = 600;//#todo from config
|
in->pAVCodecContext->time_base.num=1;
|
in->pAVCodecContext->time_base.den=25;
|
in->pAVCodecContext->gop_size = 25;
|
in->pAVCodecContext->max_b_frames = 0;
|
//in->pAVCodecContext->profile = FF_PROFILE_H264_MAIN;
|
in->pAVCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
|
|
av_opt_set(in->pAVCodecContext->priv_data, "preset", "superfast", 0);
|
//av_opt_set(in->pAVCodecContext->priv_data, "tune", "zerolatency", 0);
|
|
if(avcodec_open2(in->pAVCodecContext, avCodec, NULL) >= 0)
|
{
|
in->pAVFrame = av_frame_alloc(); // Allocate video frame
|
|
in->pAVFrame->format = in->pAVCodecContext->pix_fmt;
|
in->pAVFrame->width = in->pAVCodecContext->width;
|
in->pAVFrame->height = in->pAVCodecContext->height;
|
|
int ret = av_image_alloc(in->pAVFrame->data, in->pAVFrame->linesize, in->pAVCodecContext->width, in->pAVCodecContext->height,
|
in->pAVCodecContext->pix_fmt, 16);
|
if (ret < 0)
|
{
|
printf("av_image_alloc error\n");
|
return false;
|
}
|
}
|
else
|
{
|
printf("avcodec_open2 error\n");
|
return false;
|
}
|
|
//int ret = avformat_alloc_output_context2(&(in->pAVFormatContext), NULL, "avi", "");
|
//if (ret < 0 || in->pAVFormatContext == nullptr)
|
//{
|
// printf("avformat_alloc_output_context2 error\n");
|
// return false;
|
//}
|
//
|
//in->pAVStream = avformat_new_stream(in->pAVFormatContext, avCodec);
|
//if (in->pAVStream == nullptr)
|
//{
|
// printf("avformat_new_stream error\n");
|
// return false;
|
//}
|
//in->pAVStream->id = in->pAVFormatContext->nb_streams-1;
|
|
return true;
|
}
|
|
#define SUBSAMPLE(v, a) ((((v) + (a) - 1)) / (a))
|
|
void copyAVFrame(AVFrame* dest, AVFrame* src)
|
{
|
int src_width = src->width;
|
int src_height = src->height;
|
int dst_width = dest->width;
|
int dst_height = dest->height;
|
printf("I420Scale sw=%d, sh=%d, dw=%d, dh=%d\n", src_width,src_height,dst_width, dst_height);
|
|
libyuv::I420Scale(src->data[0], src_width,
|
src->data[1], SUBSAMPLE(src_width, 2),
|
src->data[2], SUBSAMPLE(src_width, 2),
|
src_width, src_height,
|
dest->data[0], dst_width,
|
dest->data[1], SUBSAMPLE(dst_width, 2),
|
dest->data[2], SUBSAMPLE(dst_width, 2),
|
dst_width, dst_height,
|
libyuv::kFilterNone );
|
|
//#test
|
//static size_t f=0;
|
//char fname[50];
|
//sprintf(fname, "%u.yuv420", ++f);
|
//FILE * pFile = fopen (fname,"wb");
|
//fwrite (dest->data[0] , sizeof(char), dst_width * dst_height, pFile);
|
//fwrite (dest->data[1] , sizeof(char), dst_width * dst_height / 4, pFile);
|
//fwrite (dest->data[2] , sizeof(char), dst_width * dst_height / 4, pFile);
|
//fclose(pFile);
|
|
//dest->data[0] = src->data[0];
|
//dest->data[1] = src->data[1];
|
//dest->data[2] = src->data[2];
|
|
//int height = dest->height;
|
//int width = dest->width;
|
//
|
//memcpy(dest->data[0], src->data[0], height * width); // Y
|
//memcpy(dest->data[1], src->data[1], height * width / 4); // U
|
//memcpy(dest->data[2], src->data[2], height * width / 4); // V
|
}
|
|
bool encodeH264(H264Encoder_Internal* in, AVFrame* pAVFrame, timeval pts)
|
{
|
in->buffSize = 0;
|
|
copyAVFrame(in->pAVFrame, pAVFrame);
|
|
//in->pAVFrame->pts = (1.0 / 25) * 90000 * in->frameCount;
|
//in->pAVFrame->pts = time(nullptr);
|
//in->pAVFrame->pts = (pts.tv_sec * 1000 * 1000 + pts.tv_usec) / 90000 + in->frameCount;
|
//in->pAVFrame->pts = (pts.tv_sec * 1000 * 1000 + pts.tv_usec) / 90000 + ((1.0 / 25) * 90000 * in->frameCount);
|
in->pAVFrame->pts = (pts.tv_sec * 1000 * 1000 + pts.tv_usec) / 90000;
|
//in->pAVFrame->pts = pAVFrame->pkt_pts;
|
//in->pAVFrame->pts = (1.0 / 25) * 90000 * in->frameCount;
|
|
AVPacket pAVPacket = {0};
|
av_init_packet(&pAVPacket);
|
|
// encode the image
|
int gotPacket = 0;
|
|
int ret = avcodec_encode_video2(in->pAVCodecContext, &pAVPacket, in->pAVFrame, &gotPacket);
|
if (ret < 0)
|
{
|
printf("avcodec_encode_video2 (1) error=%d\n", ret);
|
return false;
|
}
|
|
if (gotPacket > 0)
|
{
|
in->frameCount++;
|
printf("Succeed to encode (1) frame=%d, size=%d\n", in->frameCount, pAVPacket.size);
|
memcpy(in->buffer, pAVPacket.data, pAVPacket.size);
|
in->buffSize = pAVPacket.size;
|
av_free_packet(&pAVPacket);
|
}
|
|
//#todo finit
|
//Flush Encoder, when stop encoder
|
//while (gotPacket > 0)
|
//{
|
// ret = avcodec_encode_video2(in->pAVCodecContext, &pAVPacket, NULL, &gotPacket);
|
// if (ret < 0)
|
// {
|
// printf("avcodec_encode_video2 (2) error=%d\n", ret);
|
// return false;
|
// }
|
// if (gotPacket > 0)
|
// {
|
// printf("Succeed to encode (2) frame=%d, size=%d\n", in->frameCount, pAVPacket.size);
|
// memcpy(in->buffer + in->buffSize, pAVPacket.data, pAVPacket.size);
|
// in->buffSize += pAVPacket.size;
|
// av_free_packet(&pAVPacket);
|
// }
|
//}
|
|
|
//#test
|
//if (in->buffSize > 0)
|
//{
|
// static FILE * pFile = fopen("out.h264","wba+");
|
// fwrite (in->buffer , sizeof(char), in->buffSize, pFile);
|
// fflush(pFile);
|
//}
|
|
in->payError = (in->buffSize == 0);
|
return !(in->payError);
|
}
|
|
bool encodeH264(H264Encoder_Internal* in, uint8_t* buffer, timeval pts)
|
{
|
AVFrame avFrame;
|
avFrame.width = 1920;//#todo
|
avFrame.height = 1080;
|
avFrame.data[0] = buffer;
|
avFrame.data[1] = buffer + 1920*1080;
|
avFrame.data[2] = buffer + 1920*1080 + 1920*1080/4;
|
return encodeH264(in, &avFrame, pts);
|
}
|
|
bool PL_H264Encoder::pay(const PipeMaterial& pm)
|
{
|
H264Encoder_Internal* in = (H264Encoder_Internal*)internal;
|
|
in->payError = true;
|
|
if (!in->ffmpegInited)
|
{
|
bool ret = initH264EncoderEnv(in);
|
if (!ret)
|
{
|
printf("initH264EncoderEnv error\n");
|
return false;
|
}
|
else
|
in->ffmpegInited = true;
|
}
|
|
if (pm.type != PipeMaterial::PMT_FRAME)
|
{
|
printf("PL_H264Encoder::pay only support PMT_FRAME\n");
|
return false;
|
}
|
|
if (pm.buffer == nullptr)
|
return false;
|
|
MB_Frame* frame = (MB_Frame*)pm.buffer;
|
|
bool ret;
|
|
if (frame->type == MB_Frame::MBFT_PTR_AVFRAME)
|
ret = encodeH264(in, (AVFrame*)(frame->buffer), frame->pts);
|
else if (frame->type == MB_Frame::MBFT_YUV420)
|
ret = encodeH264(in, (uint8_t*)(frame->buffer), frame->pts);
|
else
|
{
|
printf("PL_H264Encoder::pay only support MBFT_PTR_AVFRAME / MBFT_YUV420\n");
|
in->payError = true;
|
return false;
|
}
|
|
in->payError = !ret;
|
|
if (ret)
|
{
|
in->lastFrame.type = MB_Frame::MBFT_H264_NALU;
|
in->lastFrame.buffer = in->buffer;
|
in->lastFrame.buffSize = in->buffSize;
|
in->lastFrame.width = frame->width;
|
in->lastFrame.height = frame->height;
|
in->lastFrame.pts = frame->pts;
|
}
|
|
return ret;
|
}
|
|
bool PL_H264Encoder::gain(PipeMaterial& pm)
|
{
|
H264Encoder_Internal* in = (H264Encoder_Internal*)internal;
|
|
if (!in->payError)
|
{
|
pm.type = PipeMaterial::PMT_FRAME;
|
pm.buffer = &(in->lastFrame);
|
pm.buffSize = 0;
|
}
|
pm.former = this;
|
return !in->payError;
|
}
|