From 2ec7bb8c87f1598ba390f99d01fea059c02a966a Mon Sep 17 00:00:00 2001 From: zhangmeng <775834166@qq.com> Date: 星期四, 25 七月 2019 17:41:51 +0800 Subject: [PATCH] update --- csrc/wrapper.cpp | 235 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 files changed, 225 insertions(+), 10 deletions(-) diff --git a/csrc/wrapper.cpp b/csrc/wrapper.cpp index ead51a0..72fda1d 100644 --- a/csrc/wrapper.cpp +++ b/csrc/wrapper.cpp @@ -12,6 +12,7 @@ #include "ffmpeg/configure/conf.hpp" #include "ffmpeg/format/FormatIn.hpp" +#include "ffmpeg/format/FormatOut.hpp" #include "ffmpeg/property/VideoProp.hpp" #include "ffmpeg/data/CodedData.hpp" #include "ffmpeg/data/FrameData.hpp" @@ -36,6 +37,9 @@ ,scale_w_(0) ,scale_h_(0) ,scale_f_(SWS_POINT) + ,gb_(0) + ,cpu_(0) + ,encoder_(nullptr) { makeTheWorld(); } @@ -53,6 +57,9 @@ if(recorder_){ delete recorder_; recorder_ = NULL; } + if (encoder_){ + delete encoder_; encoder_ = NULL; + } } void Wrapper::ScalePicture(const int w, const int h, const int flags){ @@ -61,15 +68,29 @@ scale_h_ = h; } + void Wrapper::UseGB28181(){ + gb_ = 1; + } + + void Wrapper::UseCPU(){ + cpu_ = 1; + } + std::unique_ptr<ffwrapper::FormatIn> Wrapper::init_reader(const char* input){ VideoProp prop; prop.url_ = input; prop.rtsp_tcp_ = true; + prop.gpu_acc_ = !cpu_; std::unique_ptr<FormatIn> in(new FormatIn(prop.gpuAccl())); AVDictionary *avdic = prop.optsFormat(); - int flag = in->open(input, &avdic); + int flag = -1; + if (gb_){ + flag = in->openGb28181(input, NULL); + }else{ + flag = in->open(input, &avdic); + } if(avdic){ av_dict_free(&avdic); } @@ -84,11 +105,13 @@ auto dec_ctx = in->getCodecContext(); if(bridge_)delete bridge_; + scale_w_ = scale_w_ == 0 || scale_w_ > dec_ctx->width ? dec_ctx->width : scale_w_; + scale_h_ = scale_h_ == 0 || scale_h_ > dec_ctx->height ? dec_ctx->height : scale_h_; + AVPixelFormat pix_fmt = AV_PIX_FMT_BGR24; bridge_ = new cvbridge( dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt, - scale_w_?scale_w_:dec_ctx->width, scale_h_?scale_h_:dec_ctx->height, pix_fmt, - scale_f_); + scale_w_, scale_h_, pix_fmt, scale_f_); } if (!flag){ @@ -100,6 +123,45 @@ return nullptr; } + + // ffwrapper::FormatIn* Wrapper::init_reader_gb28181(const char* input){ + + // VideoProp prop; + // prop.url_ = input; + // prop.rtsp_tcp_ = true; + // prop.gpu_acc_ = !cpu_; + // ffwrapper::FormatIn* in(new FormatIn(prop.gpuAccl())); + // AVDictionary *avdic = prop.optsFormat(); + // int flag = in->openGb28181(input, &avdic); + // if(avdic){ + // logIt("ERROR:in->openGb28181(input, &avdic):flag:%d\n",flag); + // av_dict_free(&avdic); + // } + // if(flag == 0){ + // if(!in->findStreamInfo(NULL)){ + // logIt("yolo can't find video stream\n"); + // return nullptr; + // } + // bool flag = true; + // if(map_workers_.find(WORKER_DECODER) != map_workers_.end()){ + // flag = in->openCodec(AVMEDIA_TYPE_VIDEO, NULL); + // auto dec_ctx = in->getCodecContext(); + // if(bridge_)delete bridge_; + // scale_w_ = scale_w_ == 0 || scale_w_ > dec_ctx->width ? dec_ctx->width : scale_w_; + // scale_h_ = scale_h_ == 0 || scale_h_ > dec_ctx->height ? dec_ctx->height : scale_h_; + // AVPixelFormat pix_fmt = AV_PIX_FMT_BGR24; + // bridge_ = new cvbridge( + // dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt, + // scale_w_, scale_h_, pix_fmt, scale_f_); + // } + // if (!flag){ + // logIt("FormatIn openCodec Failed!"); + // return nullptr; + // } + // return in; + // } + // return nullptr; + // } int Wrapper::init_recorder(FormatIn *in, std::string dir, const int mind, const int maxd){ if(!in){ @@ -134,9 +196,12 @@ } void Wrapper::run_stream_thread(){ + while(!stop_stream_.load()){ auto in = init_reader(input_url_.c_str()); + if (!in) { + logIt("ERROR: init_reader! url: %s\n", input_url_.c_str()); usleep(200000); continue; } @@ -153,15 +218,18 @@ pkt.id = id++; } pkt.data = data; + if(data != nullptr) { + cacheAVPacket(data->getAVPacket()); + } run_worker(in.get(), pkt); if(!data){ break; } //test - if(recorder_) - if(id % 250 == 0) - recorder_->FireRecorder(id); + // if(recorder_) + // if(id % 250 == 0) + // recorder_->FireRecorder(id); } } } @@ -235,11 +303,12 @@ pic_bgr24 pic; if(bridge_){ AVFrame *frm = frame->getAVFrame(); - unsigned char *data = (unsigned char*)malloc(frm->width * frm->height * 3); + pic.w = scale_w_; + pic.h = scale_h_; + + unsigned char *data = (unsigned char*)malloc(pic.w * pic.h * 3); bridge_->copyPicture(data, frm); pic.data = data; - pic.w = scale_w_ ? scale_w_ : frm->width; - pic.h = scale_h_ ? scale_h_ : frm->height; } if(func_dec_){ func_dec_(pic.data, pic.w, pic.h); @@ -268,6 +337,30 @@ auto p = list_pic_.front(); *data = p.data; *w = p.w; *h = p.h; list_pic_.pop_front(); + } + + void Wrapper::GetPacket(unsigned char **pktData, int *size, int *key){ + std::lock_guard<std::mutex> l(mutex_avpkt_); + if(list_avpkt_.empty()){ + return; + } + auto pkt = list_avpkt_.front(); + *key = pkt.flags & AV_PKT_FLAG_KEY; + *size = pkt.size; + *pktData = (unsigned char *)malloc(*size); + memcpy(*pktData, pkt.data, pkt.size); + + list_avpkt_.pop_front(); + } + void Wrapper::cacheAVPacket(const AVPacket &pkt){ + std::lock_guard<std::mutex> l(mutex_pic_); + while(list_avpkt_.size() > 10){ +// printf("cacheAVPacket drop packets!!!!!!!!!!\n"); + for(int i = 0; i < 5; i++){ + list_avpkt_.pop_front(); + } + } + list_avpkt_.emplace_back(pkt); } void Wrapper::run_worker(ffwrapper::FormatIn *in, avpacket &pkt){ @@ -349,4 +442,126 @@ } return NULL; } -} \ No newline at end of file + + + void Wrapper::BuildEncoder(const char *file, const int w, const int h, const int fps, const int br, const int gi){ + std::string dir(file); + + map_workers_[WORKER_ENCODER] = [=](FormatIn *in){ + return init_encoder(in, dir.c_str(), w, h, fps, br, gi); + }; + } + + int Wrapper::init_encoder(FormatIn *in, const char *file, const int w, const int h, const int fps, const int br, const int gi){ + if(!in){ + logIt("Init wrapper first"); + return -1; + } + if(encoder_){ + logIt("recorder exist"); + delete encoder_; + } + + VideoProp prop_; + AVCodecContext *ctx = in->getCodecContext(); + if(w == 0 && h == 0){ + prop_.width_ = ctx->width; + prop_.height_ = ctx->height; + } + prop_.sample_aspect_ratio_ = ctx->sample_aspect_ratio; + encoder_ = new FormatOut(prop_, file); + + return 0; + } + +/////// for encoder + typedef struct _PicEncoder{ + FormatOut *enc; + int w; + int h; + int fps; + int br; + int gi; + int flag; + cvbridge *bridge; + } PicEncoder; + + void *CreateEncoder(const int w, const int h, const int fps, const int br, const int scale_flag, const int gi){ + + PicEncoder *e = (PicEncoder*)malloc(sizeof(PicEncoder)); + e->enc = NULL; + e->w = w; + e->h = h; + e->fps = fps; + e->br = br; + e->gi = gi; + e->flag = scale_flag; + e->bridge = NULL; + + VideoProp prop_; + prop_.width_ = w; + prop_.height_ = h; + prop_.fps_ = fps; + prop_.bit_rate_ = br; + gi < 0 ? prop_.gpu_acc_ = false : prop_.gpu_acc_ = true; + + FormatOut *enc = new FormatOut(prop_, "./88.mp4"); + e->enc = enc; + + return e; + } + + void DestroyEncoder(void *h){ + PicEncoder *e = (PicEncoder*)h; + if (e == NULL){ + return; + } + + delete e->bridge; + delete e->enc; + + free(e); + } + + int Encode(void *hdl, uint8_t *in, const int w, const int h, uint8_t **out, int *size, int *key){ + + PicEncoder *e = (PicEncoder*)hdl; + auto ctx = e->enc->getCodecContext(); + + if (e->bridge == NULL){ + AVPixelFormat pix_fmt = AV_PIX_FMT_BGR24; + e->bridge = new cvbridge( + w, h, AV_PIX_FMT_BGR24, + e->w, e->h, ctx->pix_fmt, e->flag); + } + + AVFrame *frame = e->bridge->getAVFrame(in, w, h); + auto data(std::make_shared<CodedData>()); + + const int flag = e->enc->encode(data, frame); + if(flag > 0){ + auto pkt = data->getAVPacket(); + int extradata_size = ctx->extradata_size; + uint8_t *extra = ctx->extradata; + + *key = pkt.flags & AV_PKT_FLAG_KEY; + if(!(*key)){ + extradata_size = 0; + } + *size = pkt.size + extradata_size; + *out = (unsigned char *)malloc(*size); + + memcpy(*out, extra, extradata_size); + memcpy(*out + extradata_size, pkt.data, pkt.size); + + }else{ + logIt("encode error or need more packet\n"); + } + + av_frame_free(&frame); + + return flag; + } + +} + -- Gitblit v1.8.0