#include "FormatOut.hpp"
|
|
#include <stdexcept>
|
#include <vector>
|
|
#include <unistd.h>
|
#include <sys/time.h>
|
|
extern "C"{
|
#include <libavformat/avformat.h>
|
#include <libavcodec/avcodec.h>
|
#include <libavutil/opt.h>
|
}
|
|
#include "../log/log.hpp"
|
#include "../configure/conf.hpp"
|
|
#include "../property/VideoProp.hpp"
|
#include "../data/CodedData.hpp"
|
#include "../data/FrameData.hpp"
|
|
#include "../../common/gpu/info.h"
|
|
using namespace logif;
|
|
namespace ffwrapper{
|
FormatOut::FormatOut()
|
:ctx_(NULL)
|
,v_s_(NULL)
|
,enc_ctx_(NULL)
|
,sync_opts_(0)
|
,record_(false)
|
,fps_(0.0f)
|
,format_name_("mp4")
|
,streams_(NULL)
|
{}
|
|
FormatOut::~FormatOut()
|
{
|
clear();
|
}
|
|
void FormatOut::clear(){
|
|
if(enc_ctx_){
|
avcodec_close(enc_ctx_);
|
}
|
if(ctx_){
|
closeResource();
|
|
avformat_free_context(ctx_);
|
ctx_ = NULL;
|
}
|
v_s_ = NULL;
|
sync_opts_ = 0;
|
|
}
|
|
FormatOut::FormatOut(VideoProp &prop,
|
const char *filename, char *format_name/*=NULL*/)
|
:FormatOut(){
|
|
bool flag = true;
|
for (int i = 0; i < 2; ++i)
|
{
|
flag = open(filename, format_name);
|
if(flag){
|
flag = openCodec(prop);
|
if(!prop.gpuAccl()){
|
break;
|
}
|
if(!flag){
|
prop.gpu_acc_ = false;
|
clear();
|
}else{
|
break;
|
}
|
}
|
}
|
|
if(!flag){
|
throw std::runtime_error("FormatOut Init Failed!");
|
}
|
|
fps_ = prop.fps_;
|
}
|
///////////////////////////////////////////////////////////////////////
|
bool FormatOut::open(const char *filename, const char *format_name){
|
|
const int ret = avformat_alloc_output_context2(&ctx_, NULL, format_name, filename);
|
if(ret < 0){
|
logIt("open %s failed:%s",filename,
|
getAVErrorDesc(ret).c_str());
|
|
return false;
|
}
|
|
return true;
|
}
|
|
void FormatOut::configEncoder(VideoProp &prop){
|
|
enc_ctx_->codec_id = AV_CODEC_ID_H264;
|
enc_ctx_->codec_type = AVMEDIA_TYPE_VIDEO;
|
enc_ctx_->height = (prop.height_ & 0x01) ? prop.height_-1 : prop.height_;
|
enc_ctx_->width = (prop.width_ & 0x01) ? prop.width_ - 1 : prop.width_;
|
|
enc_ctx_->sample_aspect_ratio = prop.sample_aspect_ratio_;
|
|
// v_s_->time_base.num = in->time_base.num;
|
// v_s_->time_base.den = in->time_base.den;
|
enc_ctx_->time_base.num = 1;
|
enc_ctx_->time_base.den = prop.fps_;
|
|
|
enc_ctx_->gop_size = 12;
|
enc_ctx_->pix_fmt = AV_PIX_FMT_YUV420P;
|
enc_ctx_->max_b_frames = 0;
|
|
|
enc_ctx_->flags |= AV_CODEC_FLAG_QSCALE;
|
enc_ctx_->bit_rate = prop.bit_rate_ * 1000;
|
if(!prop.gpuAccl()){
|
enc_ctx_->bit_rate *= 0.75;
|
}
|
enc_ctx_->rc_min_rate = enc_ctx_->bit_rate / 2;
|
enc_ctx_->rc_max_rate = enc_ctx_->bit_rate * 2 - enc_ctx_->rc_min_rate;
|
enc_ctx_->rc_buffer_size = enc_ctx_->rc_max_rate * 10;
|
}
|
|
bool FormatOut::openCodec(VideoProp &prop){
|
|
AVOutputFormat *ofmt = ctx_->oformat;
|
AVCodecID codec_id = AV_CODEC_ID_H264;
|
AVCodec *codec = NULL;
|
|
if(prop.gpuAccl()){
|
codec = avcodec_find_encoder_by_name("h264_nvenc");
|
if(!codec){
|
logIt("no support nvenc\n");
|
prop.gpu_acc_ = false;
|
}
|
}
|
|
if(!prop.gpuAccl()){
|
codec = avcodec_find_encoder(codec_id);
|
}
|
|
if(!codec){
|
logIt("can't find encoder %s", codec->name);
|
return false;
|
}
|
logIt("use encoder %s", codec->name);
|
|
v_s_ = avformat_new_stream(ctx_, codec);
|
|
enc_ctx_ = avcodec_alloc_context3(codec);
|
|
configEncoder(prop);
|
|
if(prop.gpuAccl()){
|
av_opt_set(enc_ctx_->priv_data, "preset", "llhp", 0);
|
int idle_gpu = gpu::getGPU(120);
|
if (prop.gpu_index_ > -1){
|
idle_gpu = prop.gpu_index_;
|
}
|
if(idle_gpu < 0){
|
logIt("NO GPU RESOURCE TO ENCODE");
|
return false;
|
}
|
av_opt_set_int(enc_ctx_->priv_data, "gpu", idle_gpu, 0);
|
printf("ENCODER USE GPU %d\n", idle_gpu);
|
}else{
|
av_opt_set(enc_ctx_->priv_data, "preset", prop.preset().c_str(), 0);
|
}
|
av_opt_set(enc_ctx_->priv_data, "tune", "zerolatency", 0);
|
av_opt_set(enc_ctx_->priv_data, "profile", "baseline", 0);
|
|
int err =avcodec_open2(enc_ctx_, codec, NULL);
|
if( err< 0)
|
{
|
logIt("can't open output codec: %s", getAVErrorDesc(err).c_str());
|
return false;
|
}
|
err = avcodec_parameters_from_context(v_s_->codecpar, enc_ctx_);
|
if (err < 0) {
|
logIt("can't avcodec_parameters_from_context: %s", getAVErrorDesc(err).c_str());
|
return false;
|
}
|
ofmt->video_codec = codec_id;
|
if(ofmt->flags & AVFMT_GLOBALHEADER)
|
{
|
enc_ctx_->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
}
|
|
|
return true;
|
|
}
|
|
const AVCodecContext *FormatOut::getCodecContext()const{
|
return enc_ctx_;
|
}
|
|
int FormatOut::encode(AVPacket &pkt, AVFrame *frame){
|
|
AVStream *out = getStream();
|
|
frame->quality = enc_ctx_->global_quality;
|
frame->pict_type = AV_PICTURE_TYPE_NONE;
|
|
pkt.data = NULL;
|
pkt.size = 0;
|
|
int ret = avcodec_send_frame(enc_ctx_, frame);
|
if(ret < 0){
|
logIt("avcodec_send_frame failed : %s", getAVErrorDesc(ret).c_str());
|
return -1;
|
}
|
|
while(ret >= 0){
|
ret = avcodec_receive_packet(enc_ctx_, &pkt);
|
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
|
break;
|
}else if (ret < 0) {
|
logIt("avcodec_receive_packet : %s", getAVErrorDesc(ret).c_str());
|
return -1;
|
}else{
|
if(pkt.pts == AV_NOPTS_VALUE
|
&& !(enc_ctx_->codec->capabilities & AV_CODEC_CAP_DELAY))
|
{
|
pkt.pts = sync_opts_++;
|
}
|
av_packet_rescale_ts(&pkt, enc_ctx_->time_base, out->time_base);
|
// printf("pkt pts: %lld\n", pkt.pts);
|
return 1;
|
}
|
|
}
|
|
return 0;
|
}
|
|
int FormatOut::encode(std::shared_ptr<CodedData> &data,
|
std::shared_ptr<FrameData> &frame_data){
|
|
AVStream *out = getStream();
|
AVCodecContext *enc_ctx = out->codec;
|
data->refExtraData(enc_ctx->extradata, enc_ctx->extradata_size);
|
|
AVPacket &pkt(data->getAVPacket());
|
AVFrame *frame = frame_data->getAVFrame();
|
|
return encode(pkt, frame);
|
}
|
|
int FormatOut::encode(std::shared_ptr<CodedData> &data,AVFrame *frame){
|
|
AVStream *out = getStream();
|
AVCodecContext *enc_ctx = out->codec;
|
data->refExtraData(enc_ctx->extradata, enc_ctx->extradata_size);
|
|
AVPacket &pkt(data->getAVPacket());
|
|
return encode(pkt, frame);
|
}
|
|
//////////////////////////////////////////////////////////////////////////
|
FormatOut::FormatOut(const double fps, const char *format_name)
|
:FormatOut(){
|
|
format_name_ = format_name;
|
|
fps_ = fps;
|
}
|
|
bool FormatOut::openResource(const char *filename, const int flags){
|
if((ctx_->oformat->flags & AVFMT_NOFILE) != AVFMT_NOFILE){
|
|
const int err = avio_open2(&ctx_->pb, filename, flags, NULL, NULL);
|
if(err < 0)
|
{
|
logIt("can't save to %s error:%s",filename,
|
getAVErrorDesc(err).c_str());
|
|
return false;
|
}
|
strcpy(&ctx_->filename[0], filename);
|
}
|
return true;
|
}
|
bool FormatOut::closeResource(){
|
if(record_){
|
return avio_close(ctx_->pb) == 0;
|
}
|
return true;
|
}
|
|
bool FormatOut::copyCodecFromIn(std::vector<AVStream*> in){
|
auto count = in.size();
|
|
for(int i = 0; i < count; i++)
|
{ //根据输入流创建输出流
|
AVStream *in_stream = in[i];
|
AVStream *out_stream = avformat_new_stream(ctx_, in_stream->codec->codec);
|
if(!out_stream)
|
{
|
logIt("Failed allocating output stream.\n");
|
return false;
|
}
|
|
if (in_stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO){
|
v_s_ = out_stream;
|
}
|
//将输出流的编码信息复制到输入流
|
auto ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
|
if(ret<0)
|
{
|
logIt("Failed to copy context from input to output stream codec context\n");
|
return false;
|
}
|
out_stream->codec->codec_tag = 0;
|
|
if(ctx_->oformat->flags & AVFMT_GLOBALHEADER)
|
out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
|
}
|
|
streams_ = in;
|
|
return true;
|
}
|
|
bool FormatOut::JustWriter(std::vector<AVStream*> in, const char *filename){
|
if(ctx_){
|
clear();
|
}
|
|
bool flag = open(NULL, format_name_.c_str());
|
|
flag = copyCodecFromIn(in) && flag;
|
if(!flag){
|
logIt("FormatOut JustWriter error from in");
|
return false;
|
}
|
|
flag = openResource(filename, 2);
|
if(flag){
|
AVDictionary *avdic = NULL;
|
char option_key[]="movflags";
|
char option_value[]="frag_keyframe+empty_moov";
|
av_dict_set(&avdic,option_key,option_value,0);
|
flag = writeHeader(&avdic);
|
av_dict_free(&avdic);
|
|
}
|
return flag;
|
}
|
|
bool FormatOut::EncodeWriter(const char *filename){
|
|
auto flag = openResource(filename, 2);
|
if(flag){
|
AVDictionary *avdic = NULL;
|
char option_key[]="movflags";
|
char option_value[]="frag_keyframe+empty_moov";
|
av_dict_set(&avdic,option_key,option_value,0);
|
|
flag = writeHeader(&avdic);
|
av_dict_free(&avdic);
|
}
|
return false;
|
}
|
|
bool FormatOut::endWriter(){
|
|
auto flag = writeTrailer();
|
closeResource();
|
record_ = false;
|
|
return flag;
|
}
|
|
const char* FormatOut::getFileName() const{
|
return ctx_->filename;
|
}
|
//////////////////////////////////////////////////////////////////////////////////////////////////
|
bool FormatOut::writeHeader(AVDictionary **options/* = NULL*/){
|
|
const int ret = avformat_write_header(ctx_, options);
|
if(ret < 0){
|
logIt("write header to file failed:%s",
|
getAVErrorDesc(ret).c_str());
|
return false;
|
}
|
record_ = true;
|
return true;
|
}
|
|
void FormatOut::adjustVideoPTS(AVPacket &pkt, const int64_t &frame_cnt){
|
int64_t time_stamp = frame_cnt;
|
|
pkt.pos = -1;
|
pkt.stream_index = 0;
|
|
//Write PTS
|
AVRational time_base = getStream()->time_base;
|
|
AVRational time_base_q = { 1, AV_TIME_BASE };
|
//Duration between 2 frames (us)
|
// int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / fps_); //内部时间戳
|
int64_t calc_duration = (int64_t)(AV_TIME_BASE / fps_); //内部时间戳
|
//Parameters
|
pkt.pts = av_rescale_q(time_stamp*calc_duration, time_base_q, time_base);
|
pkt.dts = pkt.pts;
|
pkt.duration = av_rescale_q(calc_duration, time_base_q, time_base); //(double)(calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base));
|
|
// logIt("FRAME ID: %lld, PTS : %lld, DTS : %lld", frame_cnt, pkt.pts, pkt.dts);
|
}
|
|
void FormatOut::adjustPTS(AVPacket &pkt, const int64_t &frame_cnt){
|
if (streams_.size() == 1){
|
return adjustVideoPTS(pkt, frame_cnt);
|
}
|
|
if (pkt.stream_index >= streams_.size()){
|
logIt("adjustPTS pkt stream index too much");
|
return;
|
}
|
|
AVStream *in_stream,*out_stream;
|
|
in_stream = streams_[pkt.stream_index];
|
out_stream = ctx_->streams[pkt.stream_index];
|
|
// logIt("stream %d time_base %d : %d", pkt.stream_index, in_stream->time_base.num, in_stream->time_base.den);
|
// logIt("out time_base %d : %d", out_stream->time_base.num, out_stream->time_base.den);
|
|
std::string type("video");
|
if (in_stream->codecpar->codec_type == 1){
|
type = "audio";
|
}
|
|
// logIt("BEFORE stream %d type: %s, pts: %lld, dts: %lld, duration: %lld",
|
// pkt.stream_index, type.c_str(), pkt.pts, pkt.pts, pkt.duration);
|
//copy packet
|
//转换 PTS/DTS 时序
|
pkt.pts = av_rescale_q_rnd(pkt.pts,in_stream->time_base,out_stream->time_base,(enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
|
pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
|
//printf("pts %d dts %d base %d\n",pkt.pts,pkt.dts, in_stream->time_base);
|
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
|
pkt.pos = -1;
|
|
// logIt("AFTER stream %d type: %s, pts: %lld, dts: %lld, duration: %lld",
|
// pkt.stream_index, type.c_str(), pkt.pts, pkt.pts, pkt.duration);
|
|
// //此while循环中并非所有packet都是视频帧,当收到视频帧时记录一下,仅此而已
|
// if(pkt.stream_index==video_index)
|
// {
|
// printf("Receive %8d video frames from input URL\n",frame_index);
|
// frame_index++;
|
// }
|
|
// //将包数据写入到文件。
|
// ret = av_interleaved_write_frame(ofmt_ctx,&pkt);
|
// if(ret < 0)
|
// {
|
// /**
|
// 当网络有问题时,容易出现到达包的先后不一致,pts时序混乱会导致
|
// av_interleaved_write_frame函数报 -22 错误。暂时先丢弃这些迟来的帧吧
|
// 若所大部分包都没有pts时序,那就要看情况自己补上时序(比如较前一帧时序+1)再写入。
|
// */
|
// if(ret==-22){
|
// continue;
|
// }else{
|
// printf("Error muxing packet.error code %d\n" , ret);
|
// break;
|
// }
|
|
// }
|
|
}
|
|
bool FormatOut::writeFrame(AVPacket &pkt, const int64_t &frame_cnt,
|
bool interleaved/* = true*/){
|
|
adjustPTS(pkt, frame_cnt);
|
auto ret = writeFrame2(pkt, interleaved);
|
if (!ret){
|
logIt("write to file failed, pkt.pts: %lld, dts: %lld, frame count: %d",
|
pkt.pts, pkt.dts, frame_cnt);
|
}
|
return ret;
|
}
|
|
bool FormatOut::writeFrame2(AVPacket &pkt, bool interleaved){
|
|
int ret = 0;
|
if(interleaved){
|
ret = av_interleaved_write_frame(ctx_, &pkt);
|
}else{
|
// returns 1 if flushed and there is no more data to flush
|
ret = av_write_frame(ctx_, &pkt);
|
}
|
|
if(ret < -22 || ret == 0){
|
return true;
|
}
|
|
return false;
|
}
|
|
bool FormatOut::writeTrailer(){
|
const int ret = av_write_trailer(ctx_);
|
if(ret != 0)
|
{
|
logIt("write trailer to file failed:%s",
|
getAVErrorDesc(ret).c_str());
|
return false;
|
}
|
|
return true;
|
}
|
|
}
|