| | |
| | | #include "../configure/conf.hpp" |
| | | |
| | | #include "../property/VideoProp.hpp" |
| | | #include "../data/CodedData.hpp" |
| | | #include "../data/FrameData.hpp" |
| | | |
| | | #include "../../common/gpu/info.h" |
| | | |
| | |
| | | namespace ffwrapper{ |
| | | FormatOut::FormatOut() |
| | | :ctx_(NULL) |
| | | ,v_s_(NULL) |
| | | ,v_idx_(-1) |
| | | ,a_idx_(-1) |
| | | ,enc_ctx_(NULL) |
| | | ,sync_opts_(0) |
| | | ,record_(false) |
| | | ,fps_(0.0f) |
| | | ,format_name_("mp4") |
| | | ,streams_(NULL) |
| | | {} |
| | | |
| | | FormatOut::~FormatOut() |
| | |
| | | avformat_free_context(ctx_); |
| | | ctx_ = NULL; |
| | | } |
| | | v_s_ = NULL; |
| | | sync_opts_ = 0; |
| | | |
| | | } |
| | |
| | | } |
| | | logIt("use encoder %s", codec->name); |
| | | |
| | | v_s_ = avformat_new_stream(ctx_, codec); |
| | | AVStream *v = avformat_new_stream(ctx_, codec); |
| | | v_idx_ = 0; |
| | | |
| | | enc_ctx_ = avcodec_alloc_context3(codec); |
| | | |
| | |
| | | logIt("can't open output codec: %s", getAVErrorDesc(err).c_str()); |
| | | return false; |
| | | } |
| | | err = avcodec_parameters_from_context(v_s_->codecpar, enc_ctx_); |
| | | err = avcodec_parameters_from_context(v->codecpar, enc_ctx_); |
| | | if (err < 0) { |
| | | logIt("can't avcodec_parameters_from_context: %s", getAVErrorDesc(err).c_str()); |
| | | return false; |
| | |
| | | return true; |
| | | |
| | | } |
| | | |
| | | AVStream *FormatOut::getStream(){ |
| | | if (v_idx_ == -1) return NULL; |
| | | return ctx_->streams[v_idx_]; |
| | | } |
| | | |
| | | const AVCodecContext *FormatOut::getCodecContext()const{ |
| | | return enc_ctx_; |
| | | } |
| | | |
| | | int FormatOut::encode(AVPacket &pkt, AVFrame *frame){ |
| | | int FormatOut::encode(AVPacket *pkt, AVFrame *frame){ |
| | | |
| | | AVStream *out = getStream(); |
| | | |
| | | frame->quality = enc_ctx_->global_quality; |
| | | frame->pict_type = AV_PICTURE_TYPE_NONE; |
| | | |
| | | pkt.data = NULL; |
| | | pkt.size = 0; |
| | | pkt->data = NULL; |
| | | pkt->size = 0; |
| | | |
| | | int ret = avcodec_send_frame(enc_ctx_, frame); |
| | | if(ret < 0){ |
| | |
| | | } |
| | | |
| | | while(ret >= 0){ |
| | | ret = avcodec_receive_packet(enc_ctx_, &pkt); |
| | | ret = avcodec_receive_packet(enc_ctx_, pkt); |
| | | if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { |
| | | break; |
| | | }else if (ret < 0) { |
| | | logIt("avcodec_receive_packet : %s", getAVErrorDesc(ret).c_str()); |
| | | return -1; |
| | | }else{ |
| | | if(pkt.pts == AV_NOPTS_VALUE |
| | | if(pkt->pts == AV_NOPTS_VALUE |
| | | && !(enc_ctx_->codec->capabilities & AV_CODEC_CAP_DELAY)) |
| | | { |
| | | pkt.pts = sync_opts_++; |
| | | pkt->pts = sync_opts_++; |
| | | } |
| | | av_packet_rescale_ts(&pkt, enc_ctx_->time_base, out->time_base); |
| | | av_packet_rescale_ts(pkt, enc_ctx_->time_base, out->time_base); |
| | | // printf("pkt pts: %lld\n", pkt.pts); |
| | | return 1; |
| | | return 0; |
| | | } |
| | | |
| | | } |
| | | |
| | | return 0; |
| | | } |
| | | |
| | | int FormatOut::encode(std::shared_ptr<CodedData> &data, |
| | | std::shared_ptr<FrameData> &frame_data){ |
| | | |
| | | AVStream *out = getStream(); |
| | | AVCodecContext *enc_ctx = out->codec; |
| | | data->refExtraData(enc_ctx->extradata, enc_ctx->extradata_size); |
| | | |
| | | AVPacket &pkt(data->getAVPacket()); |
| | | AVFrame *frame = frame_data->getAVFrame(); |
| | | |
| | | return encode(pkt, frame); |
| | | } |
| | | |
| | | int FormatOut::encode(std::shared_ptr<CodedData> &data,AVFrame *frame){ |
| | | |
| | | AVStream *out = getStream(); |
| | | AVCodecContext *enc_ctx = out->codec; |
| | | data->refExtraData(enc_ctx->extradata, enc_ctx->extradata_size); |
| | | |
| | | AVPacket &pkt(data->getAVPacket()); |
| | | |
| | | return encode(pkt, frame); |
| | | return -2; |
| | | } |
| | | |
| | | ////////////////////////////////////////////////////////////////////////// |
| | |
| | | return true; |
| | | } |
| | | |
| | | bool FormatOut::copyCodecFromIn(std::vector<AVStream*> in){ |
| | | auto count = in.size(); |
| | | bool FormatOut::copyCodecFromIn(AVFormatContext* in){ |
| | | |
| | | for(int i = 0; i < count; i++) |
| | | for(int i = 0; i < in->nb_streams; i++) |
| | | { //根据输入流创建输出流 |
| | | AVStream *in_stream = in[i]; |
| | | AVStream *in_stream = in->streams[i]; |
| | | AVStream *out_stream = avformat_new_stream(ctx_, in_stream->codec->codec); |
| | | if(!out_stream) |
| | | { |
| | |
| | | return false; |
| | | } |
| | | |
| | | if (in_stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO){ |
| | | v_s_ = out_stream; |
| | | } |
| | | //将输出流的编码信息复制到输入流 |
| | | auto ret = avcodec_copy_context(out_stream->codec, in_stream->codec); |
| | | if(ret<0) |
| | |
| | | logIt("Failed to copy context from input to output stream codec context\n"); |
| | | return false; |
| | | } |
| | | out_stream->codec->codec_tag = 0; |
| | | |
| | | if (in_stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO){ |
| | | v_idx_ = i; |
| | | }else{ |
| | | a_idx_ = i; |
| | | } |
| | | |
| | | out_stream->codecpar->codec_tag = out_stream->codec->codec_tag = 0; |
| | | |
| | | if(ctx_->oformat->flags & AVFMT_GLOBALHEADER) |
| | | out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; |
| | | |
| | | } |
| | | |
| | | streams_ = in; |
| | | in_ctx_ = in; |
| | | |
| | | return true; |
| | | } |
| | | |
| | | bool FormatOut::JustWriter(std::vector<AVStream*> in, const char *filename){ |
| | | bool FormatOut::JustWriter(AVFormatContext *in, const char *filename){ |
| | | if(ctx_){ |
| | | clear(); |
| | | } |
| | |
| | | } |
| | | |
| | | flag = openResource(filename, 2); |
| | | |
| | | if(flag){ |
| | | AVDictionary *avdic = NULL; |
| | | char option_key[]="movflags"; |
| | |
| | | av_dict_free(&avdic); |
| | | |
| | | } |
| | | |
| | | return flag; |
| | | } |
| | | |
| | |
| | | getAVErrorDesc(ret).c_str()); |
| | | return false; |
| | | } |
| | | |
| | | record_ = true; |
| | | return true; |
| | | } |
| | | |
| | | void FormatOut::adjustVideoPTS(AVPacket &pkt, const int64_t &frame_cnt){ |
| | | int64_t time_stamp = frame_cnt; |
| | | |
| | | pkt.pos = -1; |
| | | pkt.stream_index = 0; |
| | | |
| | | //Write PTS |
| | | AVRational time_base = getStream()->time_base; |
| | | |
| | | AVRational time_base_q = { 1, AV_TIME_BASE }; |
| | | //Duration between 2 frames (us) |
| | | // int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / fps_); //内部时间戳 |
| | | int64_t calc_duration = (int64_t)(AV_TIME_BASE / fps_); //内部时间戳 |
| | | //Parameters |
| | | pkt.pts = av_rescale_q(time_stamp*calc_duration, time_base_q, time_base); |
| | | pkt.dts = pkt.pts; |
| | | pkt.duration = av_rescale_q(calc_duration, time_base_q, time_base); //(double)(calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base)); |
| | | |
| | | // logIt("FRAME ID: %lld, PTS : %lld, DTS : %lld", frame_cnt, pkt.pts, pkt.dts); |
| | | } |
| | | |
| | | void FormatOut::adjustPTS(AVPacket &pkt, const int64_t &frame_cnt){ |
| | | if (streams_.size() == 1){ |
| | | return adjustVideoPTS(pkt, frame_cnt); |
| | | } |
| | | |
| | | if (pkt.stream_index >= streams_.size()){ |
| | | void FormatOut::adjustPTS(AVPacket *pkt, const int64_t &frame_cnt){ |
| | | if (pkt->stream_index >= ctx_->nb_streams){ |
| | | logIt("adjustPTS pkt stream index too much"); |
| | | return; |
| | | } |
| | | |
| | | AVStream *in_stream,*out_stream; |
| | | |
| | | in_stream = streams_[pkt.stream_index]; |
| | | out_stream = ctx_->streams[pkt.stream_index]; |
| | | |
| | | // logIt("stream %d time_base %d : %d", pkt.stream_index, in_stream->time_base.num, in_stream->time_base.den); |
| | | // logIt("out time_base %d : %d", out_stream->time_base.num, out_stream->time_base.den); |
| | | in_stream = in_ctx_->streams[pkt->stream_index]; |
| | | out_stream = ctx_->streams[pkt->stream_index]; |
| | | |
| | | std::string type("video"); |
| | | if (in_stream->codecpar->codec_type == 1){ |
| | | type = "audio"; |
| | | } |
| | | |
| | | // if (type == "audio") |
| | | // logIt("BEFORE stream %d type: %s, pts: %lld, dts: %lld, duration: %lld", |
| | | // pkt.stream_index, type.c_str(), pkt.pts, pkt.pts, pkt.duration); |
| | | //copy packet |
| | | //转换 PTS/DTS 时序 |
| | | pkt.pts = av_rescale_q_rnd(pkt.pts,in_stream->time_base,out_stream->time_base,(enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); |
| | | pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); |
| | | //printf("pts %d dts %d base %d\n",pkt.pts,pkt.dts, in_stream->time_base); |
| | | pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base); |
| | | pkt.pos = -1; |
| | | |
| | | //转换 PTS/DTS 时序 |
| | | pkt->pts = av_rescale_q_rnd(pkt->pts,in_stream->time_base,out_stream->time_base,(enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); |
| | | pkt->dts = av_rescale_q_rnd(pkt->dts, in_stream->time_base, out_stream->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); |
| | | pkt->duration = av_rescale_q(pkt->duration, in_stream->time_base, out_stream->time_base); |
| | | pkt->pos = -1; |
| | | |
| | | // if (type == "audio") |
| | | // logIt("AFTER stream %d type: %s, pts: %lld, dts: %lld, duration: %lld", |
| | | // pkt.stream_index, type.c_str(), pkt.pts, pkt.pts, pkt.duration); |
| | | |
| | | // //此while循环中并非所有packet都是视频帧,当收到视频帧时记录一下,仅此而已 |
| | | // if(pkt.stream_index==video_index) |
| | | // { |
| | | // printf("Receive %8d video frames from input URL\n",frame_index); |
| | | // frame_index++; |
| | | // } |
| | | |
| | | // //将包数据写入到文件。 |
| | | // ret = av_interleaved_write_frame(ofmt_ctx,&pkt); |
| | | // if(ret < 0) |
| | | // { |
| | | // /** |
| | | // 当网络有问题时,容易出现到达包的先后不一致,pts时序混乱会导致 |
| | | // av_interleaved_write_frame函数报 -22 错误。暂时先丢弃这些迟来的帧吧 |
| | | // 若所大部分包都没有pts时序,那就要看情况自己补上时序(比如较前一帧时序+1)再写入。 |
| | | // */ |
| | | // if(ret==-22){ |
| | | // continue; |
| | | // }else{ |
| | | // printf("Error muxing packet.error code %d\n" , ret); |
| | | // break; |
| | | // } |
| | | |
| | | // } |
| | | |
| | | } |
| | | |
| | | bool FormatOut::writeFrame(AVPacket &pkt, const int64_t &frame_cnt, |
| | | bool FormatOut::writeFrame(AVPacket *pkt, const int64_t &frame_cnt, |
| | | bool interleaved/* = true*/){ |
| | | |
| | | adjustPTS(pkt, frame_cnt); |
| | | auto ret = writeFrame2(pkt, interleaved); |
| | | if (!ret){ |
| | | logIt("write to file failed, pkt.pts: %lld, dts: %lld, frame count: %d", |
| | | pkt.pts, pkt.dts, frame_cnt); |
| | | pkt->pts, pkt->dts, frame_cnt); |
| | | } |
| | | return ret; |
| | | } |
| | | |
| | | bool FormatOut::writeFrame2(AVPacket &pkt, bool interleaved){ |
| | | bool FormatOut::writeFrame2(AVPacket *pkt, bool interleaved){ |
| | | |
| | | int ret = 0; |
| | | if(interleaved){ |
| | | ret = av_interleaved_write_frame(ctx_, &pkt); |
| | | ret = av_interleaved_write_frame(ctx_, pkt); |
| | | }else{ |
| | | // returns 1 if flushed and there is no more data to flush |
| | | ret = av_write_frame(ctx_, &pkt); |
| | | ret = av_write_frame(ctx_, pkt); |
| | | } |
| | | |
| | | if(ret < -22 || ret == 0){ |