From a8b447be656145c9ba2a2d8319a10ae8f726de1f Mon Sep 17 00:00:00 2001 From: zhangmeng <775834166@qq.com> Date: 星期五, 09 十月 2020 18:20:46 +0800 Subject: [PATCH] h264 mp4 --- csrc/ffmpeg/format/FormatOut.cpp | 170 +++++++++++++++++++++++++++----------------------------- 1 files changed, 83 insertions(+), 87 deletions(-) diff --git a/csrc/ffmpeg/format/FormatOut.cpp b/csrc/ffmpeg/format/FormatOut.cpp index 3dabfc8..4d6ce60 100644 --- a/csrc/ffmpeg/format/FormatOut.cpp +++ b/csrc/ffmpeg/format/FormatOut.cpp @@ -31,11 +31,14 @@ ,record_(false) ,fps_(0.0f) ,format_name_("mp4") + ,in_v_stream_(NULL) + ,in_a_stream_(NULL) {} FormatOut::~FormatOut() { clear(); + } void FormatOut::clear(){ @@ -275,61 +278,63 @@ return true; } - bool FormatOut::copyCodecFromIn(std::vector<AVStream*> in){ - - for (int i = 0; i < in.size(); i++){ - - AVStream *in_stream = in[i]; - - AVStream *out_stream = avformat_new_stream(ctx_, in_stream->codec->codec); - if(!out_stream) - { - logIt("Failed allocating output stream.\n"); - return false; - } - - //灏嗚緭鍑烘祦鐨勭紪鐮佷俊鎭鍒跺埌杈撳叆娴� - auto ret = avcodec_copy_context(out_stream->codec, in_stream->codec); - if(ret<0) - { - logIt("Failed to copy context from input to output stream codec context\n"); - return false; - } - - if (in_stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO){ - v_idx_ = i; - logIt("copy video from instream"); - - out_stream->codecpar->codec_tag = out_stream->codec->codec_tag = 0; - - if(ctx_->oformat->flags & AVFMT_GLOBALHEADER) - out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; - } - if (in_stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO){ - logIt("copy audio from instream"); - a_idx_ = i; - - out_stream->codecpar->codec_tag = out_stream->codec->codec_tag = 0; - - if(ctx_->oformat->flags & AVFMT_GLOBALHEADER) - out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; - - } + bool FormatOut::addStream(AVStream *s){ + AVStream *in_stream = s; + + AVStream *out_stream = avformat_new_stream(ctx_, in_stream->codec->codec); + if(!out_stream) + { + logIt("Failed allocating output stream.\n"); + return false; } - - in_streams_ = in; - + //灏嗚緭鍑烘祦鐨勭紪鐮佷俊鎭鍒跺埌杈撳叆娴� + auto ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar); + if(ret<0) + { + logIt("Failed to copy context from input to output stream codec context\n"); + return false; + } + out_stream->codecpar->codec_tag = 0; + + if(ctx_->oformat->flags & AVFMT_GLOBALHEADER) + out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; + return true; } - bool FormatOut::JustWriter(std::vector<AVStream*> in, const char *filename){ + bool FormatOut::copyCodecFromIn(AVStream *v, AVStream *a){ + if (v){ + v_idx_ = 0; + in_v_stream_ = v; + + if (!addStream(v)){ + logIt("format out add video stream error"); + return false; + }else{ + logIt("copy video from instream"); + } + } + if (a){ + a_idx_ = 1; + in_a_stream_ = a; + if (!addStream(a)){ + logIt("format out add audio stream error"); + return false; + }else{ + logIt("copy audio from instream"); + } + } + return true; + } + + bool FormatOut::JustWriter(AVStream *v, AVStream *a, const char *filename){ if(ctx_){ clear(); } bool flag = open(NULL, format_name_.c_str()); - flag = copyCodecFromIn(in) && flag; + flag = copyCodecFromIn(v, a) && flag; if(!flag){ logIt("FormatOut JustWriter error from in"); return false; @@ -345,7 +350,7 @@ flag = writeHeader(&avdic); av_dict_free(&avdic); } - + return flag; } @@ -391,34 +396,11 @@ } void FormatOut::adjustPTS(AVPacket *pkt, const int64_t &frame_cnt){ - if (pkt->stream_index >= ctx_->nb_streams){ - logIt("adjustPTS pkt stream index too much"); - return; - } - - if (pkt->pts == AV_NOPTS_VALUE && pkt->pts == AV_NOPTS_VALUE){ - int64_t time_stamp = frame_cnt; - - pkt->pos = -1; - pkt->stream_index = 0; - //Write PTS - AVRational time_base = getStream()->time_base; - - AVRational time_base_q = { 1, AV_TIME_BASE }; - //Duration between 2 frames (us) - // int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / fps_); //鍐呴儴鏃堕棿鎴� - int64_t calc_duration = (int64_t)(AV_TIME_BASE / fps_); //鍐呴儴鏃堕棿鎴� - //Parameters - pkt->pts = av_rescale_q(time_stamp*calc_duration, time_base_q, time_base); - pkt->dts = pkt->pts; - pkt->duration = av_rescale_q(calc_duration, time_base_q, time_base); - return; - } - AVStream *in_stream,*out_stream; int out_idx = -1; - for (auto i : in_streams_){ - if (i->index == pkt->stream_index){ + std::vector<AVStream*> in_streams{in_v_stream_, in_a_stream_}; + for (auto i : in_streams){ + if (i && (i->index == pkt->stream_index)){ if (i->codecpar->codec_type == AVMEDIA_TYPE_VIDEO){ out_idx = v_idx_; in_stream = i; @@ -431,31 +413,41 @@ } } if (out_idx == -1) return; - out_stream = ctx_->streams[out_idx]; pkt->stream_index = out_idx; + + int64_t time_stamp = frame_cnt; + + if (out_idx == v_idx_){ + pkt->pos = -1; + AVRational time_base = ctx_->streams[out_idx]->time_base; + + AVRational time_base_q = { 1, AV_TIME_BASE }; + int64_t calc_duration = (int64_t)(AV_TIME_BASE / fps_); //鍐呴儴鏃堕棿鎴� + pkt->pts = av_rescale_q(time_stamp*calc_duration, time_base_q, time_base); + pkt->dts = pkt->pts; + pkt->duration = av_rescale_q(calc_duration, time_base_q, time_base); + + }else if (out_idx == a_idx_){ + + pkt->duration = 1024; + pkt->pts = pkt->dts = pkt->duration * time_stamp; + + } // logIt("BEFORE in stream timebase %d:%d, out timebase %d:%d, // pts: %lld, dts: %lld, duration: %lld", // in_stream->time_base.num, in_stream->time_base.den, // out_stream->time_base.num, out_stream->time_base.den, // pkt->pts, pkt->dts, pkt->duration); - //杞崲 PTS/DTS 鏃跺簭 - pkt->pts = av_rescale_q_rnd(pkt->pts,in_stream->time_base,out_stream->time_base,(enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); - pkt->dts = av_rescale_q_rnd(pkt->dts, in_stream->time_base, out_stream->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); - pkt->duration = av_rescale_q(pkt->duration, in_stream->time_base, out_stream->time_base); - pkt->pos = -1; - - // logIt("AFTER stream %d, pts: %lld, dts: %lld, duration: %lld", - // pkt->stream_index, pkt->pts, pkt->dts, pkt->duration); } bool FormatOut::writeFrame(AVPacket *pkt, const int64_t &frame_cnt, bool interleaved/* = true*/){ adjustPTS(pkt, frame_cnt); - auto ret = writeFrame2(pkt, interleaved); + auto ret = writeFrameInternal(pkt, interleaved); if (!ret){ logIt("write to file failed, pkt.pts: %lld, dts: %lld, frame count: %d", pkt->pts, pkt->dts, frame_cnt); @@ -463,16 +455,15 @@ return ret; } - bool FormatOut::writeFrame2(AVPacket *pkt, bool interleaved){ - + static bool write_frame(AVFormatContext *ctx, AVPacket *pkt, bool interleaved){ int ret = 0; if(interleaved){ - ret = av_interleaved_write_frame(ctx_, pkt); + ret = av_interleaved_write_frame(ctx, pkt); }else{ // returns 1 if flushed and there is no more data to flush - ret = av_write_frame(ctx_, pkt); + ret = av_write_frame(ctx, pkt); } - + if(ret < -22 || ret == 0){ return true; } @@ -480,6 +471,11 @@ return false; } + bool FormatOut::writeFrameInternal(AVPacket *pkt, bool interleaved){ + + return write_frame(ctx_, pkt, interleaved); + } + bool FormatOut::writeTrailer(){ const int ret = av_write_trailer(ctx_); if(ret != 0) -- Gitblit v1.8.0