| | |
| | | } |
| | | |
| | | out_ = new FormatOut(in_->getFPS(), "mp4"); |
| | | AVStream *vs = in_->getStream(0); |
| | | AVStream *as = in_->getStream(1); |
| | | if (!vs) return -1; |
| | | if (audio && !as) logIt("there is no audio"); |
| | | |
| | | std::vector<AVStream*> in; |
| | | in.push_back(vs); |
| | | if (audio && as) in.push_back(as); |
| | | |
| | | file_path_ = dir_ + "/" + sole::uuid4().base62() + ".mp4"; |
| | | auto ret = out_->JustWriter(in, file_path_.c_str()); |
| | | auto ret = out_->JustWriter(in_->getFromatContext(), file_path_.c_str()); |
| | | if (ret){ |
| | | return 0; |
| | | } |
| | |
| | | return 1; |
| | | } |
| | | |
| | | int64_t cur = cur_frame++; |
| | | AVPacket &op = pkt.data->getAVPacket(); |
| | | AVPacket np(op); |
| | | av_copy_packet(&np, &op); |
| | | auto ret = out_->writeFrame(np, cur); |
| | | av_packet_unref(&np); |
| | | if (!ret) return -1; |
| | | |
| | | int64_t cur = cur_frame; |
| | | if (in_->isVideoPkt(&np)){ |
| | | |
| | | if(pkt.id == id_frame_){ |
| | | id_frame_in_file_ = cur_frame-1; |
| | | id_frame_in_file_ = cur_frame; |
| | | } |
| | | cur_frame++; |
| | | } |
| | | |
| | | auto ret = out_->writeFrame(&np, cur); |
| | | av_packet_unref(&np); |
| | | |
| | | if (!ret) return -1; |
| | | |
| | | // logIt("WRITE FRAME ID: %d, RECORD ID: %d", pkt.id, id_frame_); |
| | | return 0; |
| | |
| | | |
| | | if(id_frame_ == -1){ |
| | | //wait I |
| | | if (!audio_ && in_->isAudioPkt(pkt.data->getAVPacket())){ |
| | | if (!audio_ && in_->isAudioPkt(&pkt.data->getAVPacket())){ |
| | | return 0; |
| | | } |
| | | |
| | | if (list_pkt_.empty()) { |
| | | AVPacket &avpkt = pkt.data->getAVPacket(); |
| | | if (!(avpkt.flags & AV_PKT_FLAG_KEY)){ |
| | | return -1; |
| | | } |
| | | } |
| | | |
| | | maybe_dump_gop(); |
| | |
| | | std::lock_guard<std::mutex> locker(mutex_pkt_); |
| | | bool i = false; |
| | | for (auto &p : lst){ |
| | | if (!i){ |
| | | if (!audio_ && in_->isAudioPkt(p.data->getAVPacket())){ |
| | | if (!audio_ && in_->isAudioPkt(&p.data->getAVPacket())){ |
| | | continue; |
| | | } |
| | | AVPacket &avpkt = p.data->getAVPacket(); |
| | | if (!(avpkt.flags & AV_PKT_FLAG_KEY)){ |
| | | continue; |
| | | } |
| | | i = true; |
| | | } |
| | | |
| | | list_pkt_.push_back(p); |
| | |
| | | while (list_pkt_.size() > minduration) { |
| | | list_pkt_.pop_front(); |
| | | while(!list_pkt_.empty()){ |
| | | auto &cache = list_pkt_.front(); |
| | | AVPacket &avpkt = cache.data->getAVPacket(); |
| | | if (!(avpkt.flags & AV_PKT_FLAG_KEY)){ |
| | | auto &i = list_pkt_.front(); |
| | | if (!(i.data->getAVPacket().flags & AV_PKT_FLAG_KEY)){ |
| | | list_pkt_.pop_front(); |
| | | }else{ |
| | | break; |
| | |
| | | |
| | | #include "../common/callback.hpp" |
| | | |
| | | #include "../common.hpp" |
| | | |
| | | struct AVPacket; |
| | | |
| | | namespace ffwrapper{ |
| | | class FormatIn; |
| | | class FormatOut; |
| | | |
| | | class CodedData; |
| | | } |
| | | |
| | | namespace cffmpeg_wrap{ |
| | | namespace buz{ |
| | | // 缓存的视频帧,等待fire触发开始录像 |
| | | typedef struct _cache_pkt{ |
| | | std::shared_ptr<ffwrapper::CodedData> data; |
| | | int64_t id; |
| | | }CPacket; |
| | | |
| | | |
| | | class Recorder{ |
| | | public: |
New file |
| | |
| | | #ifndef _cffmpeg_common_hpp_ |
| | | #define _cffmpeg_common_hpp_ |
| | | |
| | | #include <stdint.h> |
| | | #include <memory> |
| | | |
| | | namespace ffwrapper{ |
| | | class CodedData; |
| | | } |
| | | // 缓存的视频帧 |
| | | typedef struct _cache_pkt{ |
| | | std::shared_ptr<ffwrapper::CodedData> data; |
| | | int64_t id; |
| | | }CPacket; |
| | | |
| | | #endif |
| | |
| | | #include "../configure/conf.hpp" |
| | | |
| | | #include "../property/VideoProp.hpp" |
| | | #include "../data/CodedData.hpp" |
| | | #include "../data/FrameData.hpp" |
| | | |
| | | #include "../../common/gpu/info.h" |
| | | |
| | |
| | | vs_idx_ = i; |
| | | |
| | | auto in = ctx_->streams[i]; |
| | | if(in->r_frame_rate.num >=1 && in->r_frame_rate.den >= 1){ |
| | | fps_ = av_q2d(in->r_frame_rate); |
| | | }else if(in->avg_frame_rate.num >=1 && in->avg_frame_rate.den >= 1){ |
| | | if(in->avg_frame_rate.num >=1 && in->avg_frame_rate.den >= 1){ |
| | | fps_ = av_q2d(in->avg_frame_rate); |
| | | }else if(in->r_frame_rate.num >=1 && in->r_frame_rate.den >= 1){ |
| | | fps_ = av_q2d(in->r_frame_rate); |
| | | } |
| | | logIt("video stream time base %d : %d", in->time_base.num, in->time_base.den); |
| | | logIt("in stream fps %f, time_base: %d : %d", fps_, in->time_base.num, in->time_base.den); |
| | | } |
| | | if (type == AVMEDIA_TYPE_AUDIO){ |
| | | as_idx_ = i; |
| | | logIt("audio stream time base %d : %d", ctx_->streams[i]->time_base.num, ctx_->streams[i]->time_base.den); |
| | | } |
| | | } |
| | | |
| | | return true; |
| | | } |
| | | |
| | | bool FormatIn::openCodec(const int type, AVDictionary **options){ |
| | | int stream_index = -1; |
| | | switch(type){ |
| | | case AVMEDIA_TYPE_VIDEO: |
| | | stream_index = vs_idx_; |
| | | break; |
| | | default: |
| | | break; |
| | | } |
| | | if(stream_index < 0){ |
| | | logIt("open input %s codec need correct stream",ctx_->filename); |
| | | bool FormatIn::openCodec(AVDictionary **options){ |
| | | if (vs_idx_ == -1) return false; |
| | | |
| | | return false; |
| | | } |
| | | |
| | | AVStream *s = ctx_->streams[stream_index]; |
| | | AVStream *s = ctx_->streams[vs_idx_]; |
| | | |
| | | AVCodecParameters *codecpar = s->codecpar; |
| | | AVCodec *dec = NULL; |
| | |
| | | return ctx_->streams[vs_idx_]; |
| | | } |
| | | |
| | | if (type == ctx_->streams[vs_idx_]->codecpar->codec_type) |
| | | if (vs_idx_ > -1 && type == ctx_->streams[vs_idx_]->codecpar->codec_type) |
| | | return ctx_->streams[vs_idx_]; |
| | | if (type == ctx_->streams[as_idx_]->codecpar->codec_type) |
| | | if (as_idx_ > -1 && type == ctx_->streams[as_idx_]->codecpar->codec_type) |
| | | return ctx_->streams[as_idx_]; |
| | | |
| | | return NULL; |
| | |
| | | return dec_ctx_; |
| | | } |
| | | |
| | | bool FormatIn::isVideoPkt(AVPacket &pkt){ |
| | | if (pkt.stream_index == vs_idx_){ |
| | | bool FormatIn::isVideoPkt(AVPacket *pkt){ |
| | | if (pkt->stream_index == vs_idx_){ |
| | | return true; |
| | | } |
| | | return false; |
| | | } |
| | | |
| | | bool FormatIn::isAudioPkt(AVPacket &pkt){ |
| | | if (pkt.stream_index == as_idx_){ |
| | | bool FormatIn::isAudioPkt(AVPacket *pkt){ |
| | | if (pkt->stream_index == as_idx_){ |
| | | return true; |
| | | } |
| | | return false; |
| | | } |
| | | |
| | | bool FormatIn::readPacket(AVPacket &pkt_out){ |
| | | int FormatIn::readPacket(AVPacket *pkt_out){ |
| | | |
| | | const int ret = av_read_frame(ctx_, &pkt_out); |
| | | if(ret < 0){ |
| | | return false; |
| | | return av_read_frame(ctx_, pkt_out); |
| | | } |
| | | |
| | | return true; |
| | | |
| | | } |
| | | |
| | | bool FormatIn::readPacket(std::shared_ptr<CodedData> &data){ |
| | | |
| | | AVPacket &pkt(data->getAVPacket()); |
| | | return readPacket(pkt); |
| | | } |
| | | |
| | | int FormatIn::decode(AVFrame* &frame, AVPacket &pkt){ |
| | | int FormatIn::decode(AVFrame* frame, AVPacket *pkt){ |
| | | |
| | | AVStream *in = getStream(); |
| | | |
| | | av_packet_rescale_ts(&pkt, in->time_base, in->codec->time_base); |
| | | int ret = avcodec_send_packet(dec_ctx_, &pkt); |
| | | av_packet_rescale_ts(pkt, in->time_base, in->codec->time_base); |
| | | int ret = avcodec_send_packet(dec_ctx_, pkt); |
| | | if(ret < 0){ |
| | | logIt("avcodec_send_packet error : %s", getAVErrorDesc(ret).c_str()); |
| | | return -1; |
| | |
| | | logIt("decode frame failed : %s", getAVErrorDesc(ret).c_str()); |
| | | return -1; |
| | | }else{ |
| | | return 1; |
| | | } |
| | | } |
| | | return 0; |
| | | } |
| | | |
| | | int FormatIn::decode(std::shared_ptr<FrameData> &frame_data, |
| | | std::shared_ptr<CodedData> &data){ |
| | | |
| | | AVFrame *frame = frame_data->getAVFrame(); |
| | | AVPacket &pkt(data->getAVPacket()); |
| | | |
| | | return decode(frame, pkt); |
| | | } |
| | | |
| | | int FormatIn::readFrame(AVFrame* &frame){ |
| | | |
| | | auto data(std::make_shared<CodedData>()); |
| | | if(!readPacket(data)){ |
| | | return -1; |
| | | return -2; |
| | | } |
| | | |
| | | AVPacket &pkt(data->getAVPacket()); |
| | | |
| | | return decode(frame, pkt); |
| | | } |
| | | |
| | | int FormatIn::readFrame(std::shared_ptr<FrameData> &frame_data){ |
| | | |
| | | AVFrame *frame(frame_data->getAVFrame()); |
| | | |
| | | return readFrame(frame); |
| | | } |
| | | |
| | | |
| | | } |
| | |
| | | namespace ffwrapper{ |
| | | |
| | | class VideoProp; |
| | | class CodedData; |
| | | class FrameData; |
| | | |
| | | class FormatIn |
| | | { |
| | |
| | | int open(const char *filename, AVDictionary **options); |
| | | bool findStreamInfo(AVDictionary **options); |
| | | |
| | | bool openCodec(const int type, AVDictionary **options); |
| | | bool openCodec(AVDictionary **options); |
| | | |
| | | bool readPacket(AVPacket &pkt_out); |
| | | bool readPacket(std::shared_ptr<CodedData> &data); |
| | | int readPacket(AVPacket *pkt_out); |
| | | |
| | | int decode(AVFrame* &frame, AVPacket &pkt); |
| | | int decode(std::shared_ptr<FrameData> &frame_data, |
| | | std::shared_ptr<CodedData> &data); |
| | | int decode(AVFrame* frame, AVPacket *pkt); |
| | | |
| | | int readFrame(AVFrame* &frame); |
| | | int readFrame(std::shared_ptr<FrameData> &frame_data); |
| | | |
| | | bool isVideoPkt(AVPacket &pkt); |
| | | bool isAudioPkt(AVPacket &pkt); |
| | | bool isVideoPkt(AVPacket *pkt); |
| | | bool isAudioPkt(AVPacket *pkt); |
| | | private: |
| | | bool allocCodec(AVCodec *dec, AVStream *s, AVDictionary **options); |
| | | public: |
| | | AVStream *getStream(int type = -1); |
| | | AVCodecContext *getCodecContext(int type = 0); |
| | | AVFormatContext *getFromatContext(){return ctx_;} |
| | | const double getFPS()const{return fps_;} |
| | | private: |
| | | AVFormatContext *ctx_; |
| | |
| | | #include "../configure/conf.hpp" |
| | | |
| | | #include "../property/VideoProp.hpp" |
| | | #include "../data/CodedData.hpp" |
| | | #include "../data/FrameData.hpp" |
| | | |
| | | #include "../../common/gpu/info.h" |
| | | |
| | |
| | | namespace ffwrapper{ |
| | | FormatOut::FormatOut() |
| | | :ctx_(NULL) |
| | | ,v_s_(NULL) |
| | | ,v_idx_(-1) |
| | | ,a_idx_(-1) |
| | | ,enc_ctx_(NULL) |
| | | ,sync_opts_(0) |
| | | ,record_(false) |
| | | ,fps_(0.0f) |
| | | ,format_name_("mp4") |
| | | ,streams_(NULL) |
| | | {} |
| | | |
| | | FormatOut::~FormatOut() |
| | |
| | | avformat_free_context(ctx_); |
| | | ctx_ = NULL; |
| | | } |
| | | v_s_ = NULL; |
| | | sync_opts_ = 0; |
| | | |
| | | } |
| | |
| | | } |
| | | logIt("use encoder %s", codec->name); |
| | | |
| | | v_s_ = avformat_new_stream(ctx_, codec); |
| | | AVStream *v = avformat_new_stream(ctx_, codec); |
| | | v_idx_ = 0; |
| | | |
| | | enc_ctx_ = avcodec_alloc_context3(codec); |
| | | |
| | |
| | | logIt("can't open output codec: %s", getAVErrorDesc(err).c_str()); |
| | | return false; |
| | | } |
| | | err = avcodec_parameters_from_context(v_s_->codecpar, enc_ctx_); |
| | | err = avcodec_parameters_from_context(v->codecpar, enc_ctx_); |
| | | if (err < 0) { |
| | | logIt("can't avcodec_parameters_from_context: %s", getAVErrorDesc(err).c_str()); |
| | | return false; |
| | |
| | | |
| | | } |
| | | |
| | | AVStream *FormatOut::getStream(){ |
| | | if (v_idx_ == -1) return NULL; |
| | | return ctx_->streams[v_idx_]; |
| | | } |
| | | |
| | | const AVCodecContext *FormatOut::getCodecContext()const{ |
| | | return enc_ctx_; |
| | | } |
| | | |
| | | int FormatOut::encode(AVPacket &pkt, AVFrame *frame){ |
| | | int FormatOut::encode(AVPacket *pkt, AVFrame *frame){ |
| | | |
| | | AVStream *out = getStream(); |
| | | |
| | | frame->quality = enc_ctx_->global_quality; |
| | | frame->pict_type = AV_PICTURE_TYPE_NONE; |
| | | |
| | | pkt.data = NULL; |
| | | pkt.size = 0; |
| | | pkt->data = NULL; |
| | | pkt->size = 0; |
| | | |
| | | int ret = avcodec_send_frame(enc_ctx_, frame); |
| | | if(ret < 0){ |
| | |
| | | } |
| | | |
| | | while(ret >= 0){ |
| | | ret = avcodec_receive_packet(enc_ctx_, &pkt); |
| | | ret = avcodec_receive_packet(enc_ctx_, pkt); |
| | | if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { |
| | | break; |
| | | }else if (ret < 0) { |
| | | logIt("avcodec_receive_packet : %s", getAVErrorDesc(ret).c_str()); |
| | | return -1; |
| | | }else{ |
| | | if(pkt.pts == AV_NOPTS_VALUE |
| | | if(pkt->pts == AV_NOPTS_VALUE |
| | | && !(enc_ctx_->codec->capabilities & AV_CODEC_CAP_DELAY)) |
| | | { |
| | | pkt.pts = sync_opts_++; |
| | | pkt->pts = sync_opts_++; |
| | | } |
| | | av_packet_rescale_ts(&pkt, enc_ctx_->time_base, out->time_base); |
| | | av_packet_rescale_ts(pkt, enc_ctx_->time_base, out->time_base); |
| | | // printf("pkt pts: %lld\n", pkt.pts); |
| | | return 1; |
| | | } |
| | | |
| | | } |
| | | |
| | | return 0; |
| | | } |
| | | |
| | | int FormatOut::encode(std::shared_ptr<CodedData> &data, |
| | | std::shared_ptr<FrameData> &frame_data){ |
| | | |
| | | AVStream *out = getStream(); |
| | | AVCodecContext *enc_ctx = out->codec; |
| | | data->refExtraData(enc_ctx->extradata, enc_ctx->extradata_size); |
| | | |
| | | AVPacket &pkt(data->getAVPacket()); |
| | | AVFrame *frame = frame_data->getAVFrame(); |
| | | |
| | | return encode(pkt, frame); |
| | | } |
| | | |
| | | int FormatOut::encode(std::shared_ptr<CodedData> &data,AVFrame *frame){ |
| | | |
| | | AVStream *out = getStream(); |
| | | AVCodecContext *enc_ctx = out->codec; |
| | | data->refExtraData(enc_ctx->extradata, enc_ctx->extradata_size); |
| | | |
| | | AVPacket &pkt(data->getAVPacket()); |
| | | |
| | | return encode(pkt, frame); |
| | | return -2; |
| | | } |
| | | |
| | | ////////////////////////////////////////////////////////////////////////// |
| | |
| | | return true; |
| | | } |
| | | |
| | | bool FormatOut::copyCodecFromIn(std::vector<AVStream*> in){ |
| | | auto count = in.size(); |
| | | bool FormatOut::copyCodecFromIn(AVFormatContext* in){ |
| | | |
| | | for(int i = 0; i < count; i++) |
| | | for(int i = 0; i < in->nb_streams; i++) |
| | | { //根据输入流创建输出流 |
| | | AVStream *in_stream = in[i]; |
| | | AVStream *in_stream = in->streams[i]; |
| | | AVStream *out_stream = avformat_new_stream(ctx_, in_stream->codec->codec); |
| | | if(!out_stream) |
| | | { |
| | |
| | | return false; |
| | | } |
| | | |
| | | if (in_stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO){ |
| | | v_s_ = out_stream; |
| | | } |
| | | //将输出流的编码信息复制到输入流 |
| | | auto ret = avcodec_copy_context(out_stream->codec, in_stream->codec); |
| | | if(ret<0) |
| | |
| | | logIt("Failed to copy context from input to output stream codec context\n"); |
| | | return false; |
| | | } |
| | | out_stream->codec->codec_tag = 0; |
| | | |
| | | if (in_stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO){ |
| | | v_idx_ = i; |
| | | }else{ |
| | | a_idx_ = i; |
| | | } |
| | | |
| | | out_stream->codecpar->codec_tag = out_stream->codec->codec_tag = 0; |
| | | |
| | | if(ctx_->oformat->flags & AVFMT_GLOBALHEADER) |
| | | out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; |
| | | |
| | | } |
| | | |
| | | streams_ = in; |
| | | in_ctx_ = in; |
| | | |
| | | return true; |
| | | } |
| | | |
| | | bool FormatOut::JustWriter(std::vector<AVStream*> in, const char *filename){ |
| | | bool FormatOut::JustWriter(AVFormatContext *in, const char *filename){ |
| | | if(ctx_){ |
| | | clear(); |
| | | } |
| | |
| | | } |
| | | |
| | | flag = openResource(filename, 2); |
| | | |
| | | if(flag){ |
| | | AVDictionary *avdic = NULL; |
| | | char option_key[]="movflags"; |
| | |
| | | av_dict_free(&avdic); |
| | | |
| | | } |
| | | |
| | | return flag; |
| | | } |
| | | |
| | |
| | | getAVErrorDesc(ret).c_str()); |
| | | return false; |
| | | } |
| | | |
| | | record_ = true; |
| | | return true; |
| | | } |
| | | |
| | | void FormatOut::adjustVideoPTS(AVPacket &pkt, const int64_t &frame_cnt){ |
| | | int64_t time_stamp = frame_cnt; |
| | | |
| | | pkt.pos = -1; |
| | | pkt.stream_index = 0; |
| | | |
| | | //Write PTS |
| | | AVRational time_base = getStream()->time_base; |
| | | |
| | | AVRational time_base_q = { 1, AV_TIME_BASE }; |
| | | //Duration between 2 frames (us) |
| | | // int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / fps_); //内部时间戳 |
| | | int64_t calc_duration = (int64_t)(AV_TIME_BASE / fps_); //内部时间戳 |
| | | //Parameters |
| | | pkt.pts = av_rescale_q(time_stamp*calc_duration, time_base_q, time_base); |
| | | pkt.dts = pkt.pts; |
| | | pkt.duration = av_rescale_q(calc_duration, time_base_q, time_base); //(double)(calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base)); |
| | | |
| | | // logIt("FRAME ID: %lld, PTS : %lld, DTS : %lld", frame_cnt, pkt.pts, pkt.dts); |
| | | } |
| | | |
| | | void FormatOut::adjustPTS(AVPacket &pkt, const int64_t &frame_cnt){ |
| | | if (streams_.size() == 1){ |
| | | return adjustVideoPTS(pkt, frame_cnt); |
| | | } |
| | | |
| | | if (pkt.stream_index >= streams_.size()){ |
| | | void FormatOut::adjustPTS(AVPacket *pkt, const int64_t &frame_cnt){ |
| | | if (pkt->stream_index >= ctx_->nb_streams){ |
| | | logIt("adjustPTS pkt stream index too much"); |
| | | return; |
| | | } |
| | | |
| | | AVStream *in_stream,*out_stream; |
| | | |
| | | in_stream = streams_[pkt.stream_index]; |
| | | out_stream = ctx_->streams[pkt.stream_index]; |
| | | in_stream = in_ctx_->streams[pkt->stream_index]; |
| | | out_stream = ctx_->streams[pkt->stream_index]; |
| | | |
| | | // logIt("stream %d time_base %d : %d", pkt.stream_index, in_stream->time_base.num, in_stream->time_base.den); |
| | | // logIt("out time_base %d : %d", out_stream->time_base.num, out_stream->time_base.den); |
| | | |
| | | std::string type("video"); |
| | | if (in_stream->codecpar->codec_type == 1){ |
| | | type = "audio"; |
| | | } |
| | | |
| | | // if (type == "audio") |
| | | // logIt("BEFORE stream %d type: %s, pts: %lld, dts: %lld, duration: %lld", |
| | | // pkt.stream_index, type.c_str(), pkt.pts, pkt.pts, pkt.duration); |
| | | //copy packet |
| | | //转换 PTS/DTS 时序 |
| | | pkt.pts = av_rescale_q_rnd(pkt.pts,in_stream->time_base,out_stream->time_base,(enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); |
| | | pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); |
| | | //printf("pts %d dts %d base %d\n",pkt.pts,pkt.dts, in_stream->time_base); |
| | | pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base); |
| | | pkt.pos = -1; |
| | | |
| | | //转换 PTS/DTS 时序 |
| | | pkt->pts = av_rescale_q_rnd(pkt->pts,in_stream->time_base,out_stream->time_base,(enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); |
| | | pkt->dts = av_rescale_q_rnd(pkt->dts, in_stream->time_base, out_stream->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); |
| | | pkt->duration = av_rescale_q(pkt->duration, in_stream->time_base, out_stream->time_base); |
| | | pkt->pos = -1; |
| | | |
| | | // if (type == "audio") |
| | | // logIt("AFTER stream %d type: %s, pts: %lld, dts: %lld, duration: %lld", |
| | | // pkt.stream_index, type.c_str(), pkt.pts, pkt.pts, pkt.duration); |
| | | |
| | | // //此while循环中并非所有packet都是视频帧,当收到视频帧时记录一下,仅此而已 |
| | | // if(pkt.stream_index==video_index) |
| | | // { |
| | | // printf("Receive %8d video frames from input URL\n",frame_index); |
| | | // frame_index++; |
| | | // } |
| | | |
| | | // //将包数据写入到文件。 |
| | | // ret = av_interleaved_write_frame(ofmt_ctx,&pkt); |
| | | // if(ret < 0) |
| | | // { |
| | | // /** |
| | | // 当网络有问题时,容易出现到达包的先后不一致,pts时序混乱会导致 |
| | | // av_interleaved_write_frame函数报 -22 错误。暂时先丢弃这些迟来的帧吧 |
| | | // 若所大部分包都没有pts时序,那就要看情况自己补上时序(比如较前一帧时序+1)再写入。 |
| | | // */ |
| | | // if(ret==-22){ |
| | | // continue; |
| | | // }else{ |
| | | // printf("Error muxing packet.error code %d\n" , ret); |
| | | // break; |
| | | // } |
| | | |
| | | // } |
| | | |
| | | } |
| | | |
| | | bool FormatOut::writeFrame(AVPacket &pkt, const int64_t &frame_cnt, |
| | | bool FormatOut::writeFrame(AVPacket *pkt, const int64_t &frame_cnt, |
| | | bool interleaved/* = true*/){ |
| | | |
| | | adjustPTS(pkt, frame_cnt); |
| | | auto ret = writeFrame2(pkt, interleaved); |
| | | if (!ret){ |
| | | logIt("write to file failed, pkt.pts: %lld, dts: %lld, frame count: %d", |
| | | pkt.pts, pkt.dts, frame_cnt); |
| | | pkt->pts, pkt->dts, frame_cnt); |
| | | } |
| | | return ret; |
| | | } |
| | | |
| | | bool FormatOut::writeFrame2(AVPacket &pkt, bool interleaved){ |
| | | bool FormatOut::writeFrame2(AVPacket *pkt, bool interleaved){ |
| | | |
| | | int ret = 0; |
| | | if(interleaved){ |
| | | ret = av_interleaved_write_frame(ctx_, &pkt); |
| | | ret = av_interleaved_write_frame(ctx_, pkt); |
| | | }else{ |
| | | // returns 1 if flushed and there is no more data to flush |
| | | ret = av_write_frame(ctx_, &pkt); |
| | | ret = av_write_frame(ctx_, pkt); |
| | | } |
| | | |
| | | if(ret < -22 || ret == 0){ |
| | |
| | | |
| | | namespace ffwrapper{ |
| | | class VideoProp; |
| | | class CodedData; |
| | | class FrameData; |
| | | |
| | | class FormatOut |
| | | { |
| | |
| | | bool open(const char *filename, const char *format_name); |
| | | bool openCodec(VideoProp &prop); |
| | | |
| | | int encode(AVPacket &pkt, AVFrame *frame); |
| | | int encode(std::shared_ptr<CodedData> &data, |
| | | std::shared_ptr<FrameData> &frame_data); |
| | | int encode(std::shared_ptr<CodedData> &data,AVFrame *frame); |
| | | int encode(AVPacket *pkt, AVFrame *frame); |
| | | |
| | | public: |
| | | bool copyCodecFromIn(std::vector<AVStream*> in); |
| | | bool copyCodecFromIn(AVFormatContext* in); |
| | | bool openResource(const char *filename, const int flags); |
| | | bool closeResource(); |
| | | |
| | | bool JustWriter(std::vector<AVStream*> in, const char *filename); |
| | | bool JustWriter(AVFormatContext* in, const char *filename); |
| | | bool EncodeWriter(const char *filename); |
| | | bool writeFrame(AVPacket &pkt, const int64_t &frame_cnt, bool interleaved = true); |
| | | void adjustPTS(AVPacket &pkt, const int64_t &frame_cnt); |
| | | void adjustVideoPTS(AVPacket &pkt, const int64_t &frame_cnt); |
| | | bool writeFrame(AVPacket *pkt, const int64_t &frame_cnt, bool interleaved = true); |
| | | void adjustPTS(AVPacket *pkt, const int64_t &frame_cnt); |
| | | bool endWriter(); |
| | | |
| | | bool writeHeader(AVDictionary **options = NULL); |
| | | bool writeFrame2(AVPacket &pkt, bool interleaved); |
| | | bool writeFrame2(AVPacket *pkt, bool interleaved); |
| | | bool writeTrailer(); |
| | | public: |
| | | AVStream *getStream(){return v_s_;} |
| | | AVStream *getStream(); |
| | | const AVCodecContext *getCodecContext() const; |
| | | |
| | | const double getFPS()const{return fps_;} |
| | |
| | | void configEncoder(VideoProp &prop); |
| | | private: |
| | | AVFormatContext *ctx_; |
| | | AVStream *v_s_; |
| | | int v_idx_; |
| | | int a_idx_; |
| | | AVCodecContext *enc_ctx_; |
| | | |
| | | int64_t sync_opts_; |
| | |
| | | std::string format_name_; |
| | | |
| | | // rec |
| | | std::vector<AVStream*> streams_; |
| | | AVFormatContext *in_ctx_; |
| | | }; |
| | | } |
| | | #endif |
| | |
| | | #include <libavformat/avformat.h> |
| | | #include <libavutil/opt.h> |
| | | #include <libswscale/swscale.h> |
| | | #include <libavcodec/avcodec.h> |
| | | } |
| | | |
| | | using namespace ffwrapper; |
| | |
| | | ,conv_h_(h) |
| | | ,conv_flag_(f) |
| | | ,decRef_(dec) |
| | | ,thread_(nullptr) |
| | | ,stop_{false} |
| | | {} |
| | | |
| | | decoder::~decoder(){ |
| | | if (thread_){ |
| | | stop_.store(true); |
| | | thread_->join(); |
| | | } |
| | | |
| | | if (conv_){ |
| | | delete conv_; |
| | | } |
| | | |
| | | |
| | | { |
| | | std::lock_guard<std::mutex> l(mutex_pkt_); |
| | | list_pkt_.clear(); |
| | | } |
| | | |
| | | { |
| | | std::lock_guard<std::mutex> l(mutex_pic_); |
| | | for(auto &i : list_pic_){ |
| | | free(i.data); |
| | | } |
| | | list_pic_.clear(); |
| | | } |
| | | } |
| | | |
| | | int decoder::initDecoder(){ |
| | |
| | | if(decRef_->getCodecContext() == NULL){ |
| | | |
| | | bool flag = true; |
| | | flag = decRef_->openCodec(AVMEDIA_TYPE_VIDEO, NULL); |
| | | flag = decRef_->openCodec(NULL); |
| | | auto dec_ctx = decRef_->getCodecContext(); |
| | | if(conv_){ |
| | | delete conv_; |
| | |
| | | return 0; |
| | | } |
| | | |
| | | int decoder::SetFrame(std::shared_ptr<ffwrapper::CodedData> data, int64_t &id){ |
| | | if (!data) return -1; |
| | | if (decRef_->isAudioPkt(data->getAVPacket())) return -2; |
| | | |
| | | if (!conv_){ |
| | | initDecoder(); |
| | | } |
| | | auto frame(std::make_shared<FrameData>()); |
| | | auto ret = decRef_->decode(frame, data); |
| | | if(ret == 1){ |
| | | int decoder::saveFrame(AVFrame *frame, int64_t &id){ |
| | | //缓存数据 |
| | | BGR24 pic; |
| | | AVFrame *frm = frame->getAVFrame(); |
| | | AVFrame *frm = frame; |
| | | pic.w = conv_w_; |
| | | pic.h = conv_h_; |
| | | unsigned char *picData = (unsigned char*)malloc(pic.w * pic.h * 3); |
| | | conv_->copyPicture(picData, frm); |
| | | pic.data = picData; |
| | | pic.id = id; |
| | | |
| | | std::lock_guard<std::mutex> l(mutex_pic_); |
| | | while(list_pic_.size() > 10){ |
| | | for(int i = 0; i < 5; i++){ |
| | | while(list_pic_.size() > 50){ |
| | | for(int i = 0; i < 12; i++){ |
| | | auto t = list_pic_.front(); |
| | | free(t.data); |
| | | list_pic_.pop_front(); |
| | | } |
| | | } |
| | | list_pic_.emplace_back(pic); |
| | | |
| | | } |
| | | return list_pic_.size(); |
| | | } |
| | | |
| | | void decoder::Start(){ |
| | | if (thread_) return; |
| | | thread_.reset(new std::thread([&]{ |
| | | if (initDecoder() != 0) { |
| | | return; |
| | | } |
| | | |
| | | while(!stop_.load()){ |
| | | |
| | | std::unique_lock<std::mutex> locker(mutex_pkt_); |
| | | cv_.wait(locker, [&]{ |
| | | return !list_pkt_.empty() || stop_.load(); |
| | | }); |
| | | if (stop_.load()){ |
| | | break; |
| | | } |
| | | |
| | | auto pkt = list_pkt_.front(); |
| | | list_pkt_.pop_front(); |
| | | |
| | | AVFrame *frame = av_frame_alloc(); |
| | | AVPacket np(pkt.data->getAVPacket()); |
| | | av_copy_packet(&np, &pkt.data->getAVPacket()); |
| | | |
| | | auto ret = decRef_->decode(frame, &np); |
| | | av_packet_unref(&np); |
| | | |
| | | if (ret == 0){ |
| | | saveFrame(frame, pkt.id); |
| | | } |
| | | av_frame_free(&frame); |
| | | } |
| | | |
| | | })); |
| | | } |
| | | |
| | | int decoder::SetFrame(std::shared_ptr<ffwrapper::CodedData> data, int64_t &id){ |
| | | |
| | | if (!data) return -1; |
| | | if (decRef_->isAudioPkt(&data->getAVPacket())) return -2; |
| | | |
| | | // if (!thread_){ |
| | | // if (initDecoder() != 0) return -3; |
| | | // Start(); |
| | | // } |
| | | |
| | | // std::lock_guard<std::mutex> l(mutex_pkt_); |
| | | // list_pkt_.push_back({data, id}); |
| | | // cv_.notify_one(); |
| | | // return list_pkt_.size(); |
| | | |
| | | if (!conv_){ |
| | | if (initDecoder() != 0) return -3; |
| | | } |
| | | |
| | | AVFrame *frame = av_frame_alloc(); |
| | | AVPacket np(data->getAVPacket()); |
| | | av_copy_packet(&np, &data->getAVPacket()); |
| | | auto ret = decRef_->decode(frame, &np); |
| | | av_packet_unref(&np); |
| | | |
| | | if (ret == 0){ |
| | | saveFrame(frame, id); |
| | | } |
| | | av_frame_free(&frame); |
| | | |
| | | } |
| | | |
| | | void decoder::GetFrame(unsigned char **data, int *w, int *h, int64_t *id){ |
| | | std::lock_guard<std::mutex> l(mutex_pic_); |
| | | if(list_pic_.empty()){ |
| | |
| | | #include <memory> |
| | | #include <list> |
| | | #include <mutex> |
| | | #include <thread> |
| | | #include <atomic> |
| | | #include <condition_variable> |
| | | |
| | | #include "../common.hpp" |
| | | |
| | | struct AVFrame; |
| | | |
| | | namespace ffwrapper |
| | | { |
| | | class FormatIn; |
| | | class cvbridge; |
| | | class CodedData; |
| | | |
| | | } // namespace ffwrapper |
| | | |
| | | namespace cffmpeg_wrap |
| | |
| | | std::list<BGR24> list_pic_; |
| | | std::mutex mutex_pic_; |
| | | |
| | | std::unique_ptr<std::thread> thread_; |
| | | std::atomic_bool stop_; |
| | | |
| | | std::list<CPacket> list_pkt_; |
| | | std::mutex mutex_pkt_; |
| | | std::condition_variable cv_; |
| | | private: |
| | | int initDecoder(); |
| | | int saveFrame(AVFrame *frame, int64_t &id); |
| | | public: |
| | | void Start(); |
| | | int SetFrame(std::shared_ptr<ffwrapper::CodedData> data, int64_t &id); |
| | | void GetFrame(unsigned char **data, int *w, int *h, int64_t *id); |
| | | public: |
| | |
| | | #include <unistd.h> |
| | | #include <sys/time.h> |
| | | |
| | | extern "C"{ |
| | | #include <libavcodec/avcodec.h> |
| | | } |
| | | |
| | | #include "../ffmpeg/format/FormatIn.hpp" |
| | | #include "../ffmpeg/data/CodedData.hpp" |
| | | #include "../ffmpeg/log/log.hpp" |
| | |
| | | std::lock_guard<std::mutex> l(mtx_pkt_); |
| | | //wait I |
| | | if (list_pkt_.empty()) { |
| | | AVPacket &avpkt = data->getAVPacket(); |
| | | if (!(avpkt.flags & AV_PKT_FLAG_KEY)){ |
| | | |
| | | if (!(data->getAVPacket().flags & AV_PKT_FLAG_KEY)){ |
| | | return; |
| | | } |
| | | } |
| | | |
| | | list_pkt_.push_back({data, id}); |
| | | |
| | | // 超过缓存最大长度,删除一个gop |
| | | shrinkCache(); |
| | | } |
| | |
| | | int rec::shrinkCache(){ |
| | | //超过最大缓存,丢弃gop |
| | | //缓存最小长度的,用于记录 |
| | | while (list_pkt_.size() > minduration_/2) { |
| | | int md = minduration_ < 201 ? 200 : minduration_; |
| | | while (list_pkt_.size() > md/2) { |
| | | list_pkt_.pop_front(); |
| | | while(!list_pkt_.empty()){ |
| | | auto &cache = list_pkt_.front(); |
| | | AVPacket &avpkt = cache.data->getAVPacket(); |
| | | if (!(avpkt.flags & AV_PKT_FLAG_KEY)){ |
| | | auto &i = list_pkt_.front(); |
| | | if (!(i.data->getAVPacket().flags & AV_PKT_FLAG_KEY)){ |
| | | list_pkt_.pop_front(); |
| | | }else{ |
| | | break; |
| | |
| | | #define _cffmpeg_rec_hpp_ |
| | | |
| | | #include <string> |
| | | #include <memory> |
| | | #include <unordered_map> |
| | | #include <list> |
| | | #include <mutex> |
| | | |
| | | #include "../buz/recorder.hpp" |
| | | |
| | | struct AVPacket; |
| | | |
| | | namespace ffwrapper |
| | | { |
| | |
| | | std::mutex mtx_recInfo_; |
| | | |
| | | // 缓存的视频帧,等待firerecsignal触发开始录像 |
| | | std::list<buz::CPacket> list_pkt_; |
| | | std::list<CPacket> list_pkt_; |
| | | // 多线程,生产者线程reader push pkt,消费者,录像线程pop |
| | | std::mutex mtx_pkt_; |
| | | |
| | |
| | | #include "stream.hpp" |
| | | |
| | | #include "../ffmpeg/data/CodedData.hpp" |
| | | extern "C"{ |
| | | #include <libavcodec/avcodec.h> |
| | | } |
| | | |
| | | #include "../ffmpeg/format/FormatIn.hpp" |
| | | #include "../ffmpeg/data/CodedData.hpp" |
| | | |
| | | namespace cffmpeg_wrap{ |
| | | stream::stream(ffwrapper::FormatIn *in, const int maxSize) |
| | |
| | | |
| | | stream::~stream(){ |
| | | std::lock_guard<std::mutex> locker(mutex_avpkt_); |
| | | list_avpkt_.clear(); |
| | | list_pkt_.clear(); |
| | | } |
| | | |
| | | int stream::SetPacket(std::shared_ptr<ffwrapper::CodedData> data){ |
| | | int stream::SetPacket(std::shared_ptr<ffwrapper::CodedData> data, int64_t &id){ |
| | | if (data){ |
| | | |
| | | auto audio = streamRef_->isAudioPkt(data->getAVPacket()); |
| | | // 如果包是音频包,但是不使用音频,直接返回 |
| | | if (!audio_ && audio){ |
| | | if (!audio_ && streamRef_->isAudioPkt(&data->getAVPacket())){ |
| | | return 0; |
| | | } |
| | | |
| | | std::lock_guard<std::mutex> locker(mutex_avpkt_); |
| | | list_avpkt_.push_back(data); |
| | | list_pkt_.push_back({data, id}); |
| | | |
| | | while(list_avpkt_.size() > max_size_){ |
| | | list_avpkt_.pop_front(); |
| | | while(!list_avpkt_.empty()){ |
| | | auto &cache = list_avpkt_.front(); |
| | | AVPacket &avpkt = cache->getAVPacket(); |
| | | if (!(avpkt.flags & AV_PKT_FLAG_KEY)){ |
| | | list_avpkt_.pop_front(); |
| | | }else{ |
| | | break; |
| | | while(list_pkt_.size() > max_size_/2*3){ |
| | | list_pkt_.pop_front(); |
| | | } |
| | | } |
| | | } |
| | | return list_avpkt_.size(); |
| | | |
| | | return list_pkt_.size(); |
| | | } |
| | | return 0; |
| | | } |
| | | |
| | | void stream::GetPacket(unsigned char **pktData, int *size, int *key){ |
| | | std::lock_guard<std::mutex> l(mutex_avpkt_); |
| | | if(list_avpkt_.empty()){ |
| | | if(list_pkt_.empty()){ |
| | | return; |
| | | } |
| | | auto data = list_avpkt_.front(); |
| | | auto pkt = data->getAVPacket(); |
| | | |
| | | auto data = list_pkt_.front(); |
| | | list_pkt_.pop_front(); |
| | | |
| | | auto pkt = data.data->getAVPacket(); |
| | | *key = pkt.flags & AV_PKT_FLAG_KEY; |
| | | *size = pkt.size; |
| | | *pktData = (unsigned char *)malloc(*size); |
| | | memcpy(*pktData, pkt.data, pkt.size); |
| | | |
| | | list_avpkt_.pop_front(); |
| | | } |
| | | } |
| | |
| | | #include <mutex> |
| | | #include <memory> |
| | | |
| | | #include "../common.hpp" |
| | | |
| | | namespace ffwrapper{ |
| | | class FormatIn; |
| | | class CodedData; |
| | | } |
| | | |
| | | namespace cffmpeg_wrap{ |
| | | class stream |
| | | { |
| | | private: |
| | | std::list<std::shared_ptr<ffwrapper::CodedData> > list_avpkt_; |
| | | std::list<CPacket> list_pkt_; |
| | | std::mutex mutex_avpkt_; |
| | | ffwrapper::FormatIn *streamRef_; |
| | | const int max_size_; |
| | |
| | | stream(ffwrapper::FormatIn *in, const int maxSize); |
| | | ~stream(); |
| | | |
| | | int SetPacket(std::shared_ptr<ffwrapper::CodedData> data); |
| | | int SetPacket(std::shared_ptr<ffwrapper::CodedData> data, int64_t &id); |
| | | void GetPacket(unsigned char **pktData, int *size, int *key); |
| | | void AudioSwitch(const bool a){audio_ = a;} |
| | | }; |
| | |
| | | #include "ffmpeg/configure/conf.hpp" |
| | | #include "ffmpeg/format/FormatIn.hpp" |
| | | #include "ffmpeg/format/FormatOut.hpp" |
| | | #include "ffmpeg/property/VideoProp.hpp" |
| | | #include "ffmpeg/data/CodedData.hpp" |
| | | #include "ffmpeg/data/FrameData.hpp" |
| | | #include "ffmpeg/property/VideoProp.hpp" |
| | | #include "ffmpeg/log/log.hpp" |
| | | #include "ffmpeg/bridge/cvbridge.hpp" |
| | | |
| | |
| | | |
| | | void Wrapper::init_worker(ffwrapper::FormatIn *in){ |
| | | if (rec_->Loaded() && stream_ && decoder_) return; |
| | | |
| | | stream_ = new stream(in, 3 * 25); |
| | | stream_->AudioSwitch(audio_); |
| | | |
| | |
| | | |
| | | void Wrapper::run_worker(ffwrapper::FormatIn *in, std::shared_ptr<ffwrapper::CodedData> data, int64_t &id){ |
| | | |
| | | if (stream_) stream_->SetPacket(data); |
| | | if (decoder_) decoder_->SetFrame(data, id); |
| | | if (stream_) stream_->SetPacket(data, id); |
| | | if (rec_->Loaded()) rec_->SetPacket(data, id); |
| | | if (decoder_) decoder_->SetFrame(data, id); |
| | | } |
| | | |
| | | void Wrapper::deinit_worker(){ |
| | |
| | | continue; |
| | | } |
| | | |
| | | int wTime = 1000000.0 / in->getFPS() ; |
| | | wTime >>= 1; |
| | | logIt("INPUT FPS: %d", wTime); |
| | | |
| | | init_worker(in.get()); |
| | | |
| | | int64_t id = 0; |
| | | while(!stop_stream_.load()){ |
| | | auto data(std::make_shared<CodedData>()); |
| | | if(!in->readPacket(data)){ |
| | | logIt("read packet error"); |
| | | if (in->readPacket(&data->getAVPacket()) != 0){ |
| | | logIt("read packet error, id: %lld", id); |
| | | break; |
| | | } |
| | | |
| | | run_worker(in.get(), data, id); |
| | | usleep(wTime); |
| | | |
| | | id++; |
| | | } |
| | | |
| | |
| | | logIt("yolo can't find video stream\n"); |
| | | return NULL; |
| | | } |
| | | auto flag = in->openCodec(AVMEDIA_TYPE_VIDEO, NULL); |
| | | auto flag = in->openCodec(NULL); |
| | | if(flag){ |
| | | auto dec_ctx = in->getCodecContext(); |
| | | |
| | |
| | | return NULL; |
| | | } |
| | | |
| | | auto data(std::make_shared<CodedData>()); |
| | | if(!in->readPacket(data)){ |
| | | logIt("read packet error"); |
| | | return NULL; |
| | | } |
| | | auto frame(std::make_shared<FrameData>()); |
| | | auto ret = in->decode(frame, data); |
| | | if(ret == 1){ |
| | | AVFrame *frm = frame->getAVFrame(); |
| | | uint8_t *data = NULL; |
| | | AVPacket *pkt = av_packet_alloc(); |
| | | if(in->readPacket(pkt) == 0){ |
| | | AVFrame *frm = av_frame_alloc(); |
| | | if(in->decode(frm, pkt) == 0){ |
| | | *w = frm->width; |
| | | *h = frm->height; |
| | | unsigned char *data = (unsigned char*)malloc(frm->width * frm->height * 3); |
| | | data = (unsigned char*)malloc(frm->width * frm->height * 3); |
| | | bridge_->copyPicture(data, frm); |
| | | return data; |
| | | } |
| | | return NULL; |
| | | av_frame_free(&frm); |
| | | av_packet_free(&pkt); |
| | | } |
| | | return data; |
| | | } |
| | | /////// for encoder |
| | | typedef struct _PicEncoder{ |
| | |
| | | } |
| | | |
| | | AVFrame *frame = e->bridge->getAVFrame(in, w, h); |
| | | auto data(std::make_shared<CodedData>()); |
| | | AVPacket *pkt = av_packet_alloc(); |
| | | |
| | | const int flag = e->enc->encode(data, frame); |
| | | if(flag > 0){ |
| | | auto pkt = data->getAVPacket(); |
| | | auto flag = e->enc->encode(pkt, frame); |
| | | if(flag == 0){ |
| | | int extradata_size = ctx->extradata_size; |
| | | uint8_t *extra = ctx->extradata; |
| | | |
| | | *key = pkt.flags & AV_PKT_FLAG_KEY; |
| | | *key = pkt->flags & AV_PKT_FLAG_KEY; |
| | | if(!(*key)){ |
| | | extradata_size = 0; |
| | | } |
| | | *size = pkt.size + extradata_size; |
| | | *size = pkt->size + extradata_size; |
| | | *out = (unsigned char *)malloc(*size); |
| | | |
| | | memcpy(*out, extra, extradata_size); |
| | | memcpy(*out + extradata_size, pkt.data, pkt.size); |
| | | memcpy(*out + extradata_size, pkt->data, pkt->size); |
| | | |
| | | }else{ |
| | | logIt("encode error or need more packet\n"); |
| | | } |
| | | |
| | | av_packet_free(&pkt); |
| | | av_frame_free(&frame); |
| | | |
| | | return flag; |
| | |
| | | #ifndef _cffmpeg_wrapper_hpp_ |
| | | #define _cffmpeg_wrapper_hpp_ |
| | | |
| | | extern "C"{ |
| | | #include <libavcodec/avcodec.h> |
| | | } |
| | | |
| | | #include <stdint.h> |
| | | |
| | | #include <string> |
| | |
| | | #include "common/callback.hpp" |
| | | |
| | | |
| | | |
| | | namespace ffwrapper{ |
| | | class FormatIn; |
| | | |
| | | class VideoProp; |
| | | class CodedData; |
| | | class VideoProp; |
| | | } |
| | | |
| | | namespace cffmpeg_wrap{ |