From eeb89c114214678a2968c151b91440374ff50572 Mon Sep 17 00:00:00 2001
From: zhangmeng <775834166@qq.com>
Date: 星期五, 11 十月 2019 09:25:10 +0800
Subject: [PATCH] fix rec no such file
---
csrc/ffmpeg/format/FormatOut.cpp | 271 +++++++++++++++++++++++-------------------------------
1 files changed, 116 insertions(+), 155 deletions(-)
diff --git a/csrc/ffmpeg/format/FormatOut.cpp b/csrc/ffmpeg/format/FormatOut.cpp
index 0c6958f..60efd59 100644
--- a/csrc/ffmpeg/format/FormatOut.cpp
+++ b/csrc/ffmpeg/format/FormatOut.cpp
@@ -16,8 +16,6 @@
#include "../configure/conf.hpp"
#include "../property/VideoProp.hpp"
-#include "../data/CodedData.hpp"
-#include "../data/FrameData.hpp"
#include "../../common/gpu/info.h"
@@ -26,13 +24,15 @@
namespace ffwrapper{
FormatOut::FormatOut()
:ctx_(NULL)
- ,v_s_(NULL)
+ ,v_idx_(-1)
+ ,a_idx_(-1)
,enc_ctx_(NULL)
,sync_opts_(0)
,record_(false)
,fps_(0.0f)
,format_name_("mp4")
- ,streams_(NULL)
+ ,in_v_stream_(NULL)
+ ,in_a_stream_(NULL)
{}
FormatOut::~FormatOut()
@@ -51,7 +51,6 @@
avformat_free_context(ctx_);
ctx_ = NULL;
}
- v_s_ = NULL;
sync_opts_ = 0;
}
@@ -152,7 +151,8 @@
}
logIt("use encoder %s", codec->name);
- v_s_ = avformat_new_stream(ctx_, codec);
+ AVStream *v = avformat_new_stream(ctx_, codec);
+ v_idx_ = 0;
enc_ctx_ = avcodec_alloc_context3(codec);
@@ -182,7 +182,7 @@
logIt("can't open output codec: %s", getAVErrorDesc(err).c_str());
return false;
}
- err = avcodec_parameters_from_context(v_s_->codecpar, enc_ctx_);
+ err = avcodec_parameters_from_context(v->codecpar, enc_ctx_);
if (err < 0) {
logIt("can't avcodec_parameters_from_context: %s", getAVErrorDesc(err).c_str());
return false;
@@ -197,20 +197,25 @@
return true;
}
+
+ AVStream *FormatOut::getStream(){
+ if (v_idx_ == -1) return NULL;
+ return ctx_->streams[v_idx_];
+ }
const AVCodecContext *FormatOut::getCodecContext()const{
return enc_ctx_;
}
- int FormatOut::encode(AVPacket &pkt, AVFrame *frame){
+ int FormatOut::encode(AVPacket *pkt, AVFrame *frame){
AVStream *out = getStream();
frame->quality = enc_ctx_->global_quality;
frame->pict_type = AV_PICTURE_TYPE_NONE;
- pkt.data = NULL;
- pkt.size = 0;
+ pkt->data = NULL;
+ pkt->size = 0;
int ret = avcodec_send_frame(enc_ctx_, frame);
if(ret < 0){
@@ -219,50 +224,26 @@
}
while(ret >= 0){
- ret = avcodec_receive_packet(enc_ctx_, &pkt);
+ ret = avcodec_receive_packet(enc_ctx_, pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
break;
}else if (ret < 0) {
logIt("avcodec_receive_packet : %s", getAVErrorDesc(ret).c_str());
return -1;
}else{
- if(pkt.pts == AV_NOPTS_VALUE
+ if(pkt->pts == AV_NOPTS_VALUE
&& !(enc_ctx_->codec->capabilities & AV_CODEC_CAP_DELAY))
{
- pkt.pts = sync_opts_++;
+ pkt->pts = sync_opts_++;
}
- av_packet_rescale_ts(&pkt, enc_ctx_->time_base, out->time_base);
+ av_packet_rescale_ts(pkt, enc_ctx_->time_base, out->time_base);
// printf("pkt pts: %lld\n", pkt.pts);
- return 1;
+ return 0;
}
}
- return 0;
- }
-
- int FormatOut::encode(std::shared_ptr<CodedData> &data,
- std::shared_ptr<FrameData> &frame_data){
-
- AVStream *out = getStream();
- AVCodecContext *enc_ctx = out->codec;
- data->refExtraData(enc_ctx->extradata, enc_ctx->extradata_size);
-
- AVPacket &pkt(data->getAVPacket());
- AVFrame *frame = frame_data->getAVFrame();
-
- return encode(pkt, frame);
- }
-
- int FormatOut::encode(std::shared_ptr<CodedData> &data,AVFrame *frame){
-
- AVStream *out = getStream();
- AVCodecContext *enc_ctx = out->codec;
- data->refExtraData(enc_ctx->extradata, enc_ctx->extradata_size);
-
- AVPacket &pkt(data->getAVPacket());
-
- return encode(pkt, frame);
+ return -2;
}
//////////////////////////////////////////////////////////////////////////
@@ -296,55 +277,70 @@
return true;
}
- bool FormatOut::copyCodecFromIn(std::vector<AVStream*> in){
- auto count = in.size();
-
- for(int i = 0; i < count; i++)
- { //鏍规嵁杈撳叆娴佸垱寤鸿緭鍑烘祦
- AVStream *in_stream = in[i];
- AVStream *out_stream = avformat_new_stream(ctx_, in_stream->codec->codec);
- if(!out_stream)
- {
- logIt("Failed allocating output stream.\n");
- return false;
- }
-
- if (in_stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO){
- v_s_ = out_stream;
- }
- //灏嗚緭鍑烘祦鐨勭紪鐮佷俊鎭鍒跺埌杈撳叆娴�
- auto ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
- if(ret<0)
- {
- logIt("Failed to copy context from input to output stream codec context\n");
- return false;
- }
- out_stream->codec->codec_tag = 0;
-
- if(ctx_->oformat->flags & AVFMT_GLOBALHEADER)
- out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
-
+ bool FormatOut::addStream(AVStream *s){
+ AVStream *in_stream = s;
+
+ AVStream *out_stream = avformat_new_stream(ctx_, in_stream->codec->codec);
+ if(!out_stream)
+ {
+ logIt("Failed allocating output stream.\n");
+ return false;
}
-
- streams_ = in;
-
+ //灏嗚緭鍑烘祦鐨勭紪鐮佷俊鎭鍒跺埌杈撳叆娴�
+ auto ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);
+ if(ret<0)
+ {
+ logIt("Failed to copy context from input to output stream codec context\n");
+ return false;
+ }
+ out_stream->codecpar->codec_tag = 0;
+
+ if(ctx_->oformat->flags & AVFMT_GLOBALHEADER)
+ out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
+
return true;
}
- bool FormatOut::JustWriter(std::vector<AVStream*> in, const char *filename){
+ bool FormatOut::copyCodecFromIn(AVStream *v, AVStream *a){
+ if (v){
+ v_idx_ = 0;
+ in_v_stream_ = v;
+
+ if (!addStream(v)){
+ logIt("format out add video stream error");
+ return false;
+ }else{
+ logIt("copy video from instream");
+ }
+ }
+ if (a){
+ a_idx_ = 1;
+ in_a_stream_ = a;
+ if (!addStream(a)){
+ logIt("format out add audio stream error");
+ return false;
+ }else{
+ logIt("copy audio from instream");
+ }
+ }
+ return true;
+ }
+
+ bool FormatOut::JustWriter(AVStream *v, AVStream *a, const char *filename){
if(ctx_){
clear();
}
bool flag = open(NULL, format_name_.c_str());
- flag = copyCodecFromIn(in) && flag;
+ flag = copyCodecFromIn(v, a) && flag;
if(!flag){
logIt("FormatOut JustWriter error from in");
return false;
}
flag = openResource(filename, 2);
+
if(flag){
AVDictionary *avdic = NULL;
char option_key[]="movflags";
@@ -352,8 +348,8 @@
av_dict_set(&avdic,option_key,option_value,0);
flag = writeHeader(&avdic);
av_dict_free(&avdic);
-
}
+
return flag;
}
@@ -393,114 +389,79 @@
getAVErrorDesc(ret).c_str());
return false;
}
+
record_ = true;
return true;
}
- void FormatOut::adjustVideoPTS(AVPacket &pkt, const int64_t &frame_cnt){
- int64_t time_stamp = frame_cnt;
-
- pkt.pos = -1;
- pkt.stream_index = 0;
-
- //Write PTS
- AVRational time_base = getStream()->time_base;
-
- AVRational time_base_q = { 1, AV_TIME_BASE };
- //Duration between 2 frames (us)
- // int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / fps_); //鍐呴儴鏃堕棿鎴�
- int64_t calc_duration = (int64_t)(AV_TIME_BASE / fps_); //鍐呴儴鏃堕棿鎴�
- //Parameters
- pkt.pts = av_rescale_q(time_stamp*calc_duration, time_base_q, time_base);
- pkt.dts = pkt.pts;
- pkt.duration = av_rescale_q(calc_duration, time_base_q, time_base); //(double)(calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base));
-
- // logIt("FRAME ID: %lld, PTS : %lld, DTS : %lld", frame_cnt, pkt.pts, pkt.dts);
- }
-
- void FormatOut::adjustPTS(AVPacket &pkt, const int64_t &frame_cnt){
- if (streams_.size() == 1){
- return adjustVideoPTS(pkt, frame_cnt);
- }
-
- if (pkt.stream_index >= streams_.size()){
- logIt("adjustPTS pkt stream index too much");
- return;
- }
-
+ void FormatOut::adjustPTS(AVPacket *pkt, const int64_t &frame_cnt){
AVStream *in_stream,*out_stream;
-
- in_stream = streams_[pkt.stream_index];
- out_stream = ctx_->streams[pkt.stream_index];
-
- // logIt("stream %d time_base %d : %d", pkt.stream_index, in_stream->time_base.num, in_stream->time_base.den);
- // logIt("out time_base %d : %d", out_stream->time_base.num, out_stream->time_base.den);
-
- std::string type("video");
- if (in_stream->codecpar->codec_type == 1){
- type = "audio";
+ int out_idx = -1;
+ std::vector<AVStream*> in_streams{in_v_stream_, in_a_stream_};
+ for (auto i : in_streams){
+ if (i->index == pkt->stream_index){
+ if (i->codecpar->codec_type == AVMEDIA_TYPE_VIDEO){
+ out_idx = v_idx_;
+ in_stream = i;
+ break;
+ }else if (i->codecpar->codec_type == AVMEDIA_TYPE_AUDIO){
+ in_stream = i;
+ out_idx = a_idx_;
+ break;
+ }
+ }
}
+ if (out_idx == -1) return;
+ out_stream = ctx_->streams[out_idx];
+ pkt->stream_index = out_idx;
- // logIt("BEFORE stream %d type: %s, pts: %lld, dts: %lld, duration: %lld",
- // pkt.stream_index, type.c_str(), pkt.pts, pkt.pts, pkt.duration);
- //copy packet
- //杞崲 PTS/DTS 鏃跺簭
- pkt.pts = av_rescale_q_rnd(pkt.pts,in_stream->time_base,out_stream->time_base,(enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
- pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
- //printf("pts %d dts %d base %d\n",pkt.pts,pkt.dts, in_stream->time_base);
- pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
- pkt.pos = -1;
+ int64_t time_stamp = frame_cnt;
- // logIt("AFTER stream %d type: %s, pts: %lld, dts: %lld, duration: %lld",
- // pkt.stream_index, type.c_str(), pkt.pts, pkt.pts, pkt.duration);
+ if (out_idx == v_idx_){
+
+ pkt->pos = -1;
+ AVRational time_base = ctx_->streams[out_idx]->time_base;
- // //姝hile寰幆涓苟闈炴墍鏈塸acket閮芥槸瑙嗛甯э紝褰撴敹鍒拌棰戝抚鏃惰褰曚竴涓嬶紝浠呮鑰屽凡
- // if(pkt.stream_index==video_index)
- // {
- // printf("Receive %8d video frames from input URL\n",frame_index);
- // frame_index++;
- // }
-
- // //灏嗗寘鏁版嵁鍐欏叆鍒版枃浠躲��
- // ret = av_interleaved_write_frame(ofmt_ctx,&pkt);
- // if(ret < 0)
- // {
- // /**
- // 褰撶綉缁滄湁闂鏃讹紝瀹规槗鍑虹幇鍒拌揪鍖呯殑鍏堝悗涓嶄竴鑷达紝pts鏃跺簭娣蜂贡浼氬鑷�
- // av_interleaved_write_frame鍑芥暟鎶� -22 閿欒銆傛殏鏃跺厛涓㈠純杩欎簺杩熸潵鐨勫抚鍚�
- // 鑻ユ墍澶ч儴鍒嗗寘閮芥病鏈塸ts鏃跺簭锛岄偅灏辫鐪嬫儏鍐佃嚜宸辫ˉ涓婃椂搴忥紙姣斿杈冨墠涓�甯ф椂搴�+1锛夊啀鍐欏叆銆�
- // */
- // if(ret==-22){
- // continue;
- // }else{
- // printf("Error muxing packet.error code %d\n" , ret);
- // break;
- // }
+ AVRational time_base_q = { 1, AV_TIME_BASE };
+ int64_t calc_duration = (int64_t)(AV_TIME_BASE / fps_); //鍐呴儴鏃堕棿鎴�
+ pkt->pts = av_rescale_q(time_stamp*calc_duration, time_base_q, time_base);
+ pkt->dts = pkt->pts;
+ pkt->duration = av_rescale_q(calc_duration, time_base_q, time_base);
- // }
-
+ }else if (out_idx == a_idx_){
+
+ pkt->duration = 1024;
+ pkt->pts = pkt->dts = pkt->duration * time_stamp;
+
+ }
+ // logIt("BEFORE in stream timebase %d:%d, out timebase %d:%d,
+ // pts: %lld, dts: %lld, duration: %lld",
+ // in_stream->time_base.num, in_stream->time_base.den,
+ // out_stream->time_base.num, out_stream->time_base.den,
+ // pkt->pts, pkt->dts, pkt->duration);
+
}
- bool FormatOut::writeFrame(AVPacket &pkt, const int64_t &frame_cnt,
+ bool FormatOut::writeFrame(AVPacket *pkt, const int64_t &frame_cnt,
bool interleaved/* = true*/){
adjustPTS(pkt, frame_cnt);
auto ret = writeFrame2(pkt, interleaved);
if (!ret){
logIt("write to file failed, pkt.pts: %lld, dts: %lld, frame count: %d",
- pkt.pts, pkt.dts, frame_cnt);
+ pkt->pts, pkt->dts, frame_cnt);
}
return ret;
}
- bool FormatOut::writeFrame2(AVPacket &pkt, bool interleaved){
+ bool FormatOut::writeFrame2(AVPacket *pkt, bool interleaved){
int ret = 0;
if(interleaved){
- ret = av_interleaved_write_frame(ctx_, &pkt);
+ ret = av_interleaved_write_frame(ctx_, pkt);
}else{
// returns 1 if flushed and there is no more data to flush
- ret = av_write_frame(ctx_, &pkt);
+ ret = av_write_frame(ctx_, pkt);
}
if(ret < -22 || ret == 0){
--
Gitblit v1.8.0