From cd3fe8cc1ae9028acb4f630ed16c12f4fb327f3c Mon Sep 17 00:00:00 2001 From: zhangmeng <775834166@qq.com> Date: 星期四, 10 十月 2019 17:35:12 +0800 Subject: [PATCH] add interface out --- csrc/wrapper.cpp | 649 +++++++++++++++++++++++++++------------------------------- 1 files changed, 301 insertions(+), 348 deletions(-) diff --git a/csrc/wrapper.cpp b/csrc/wrapper.cpp index 0b7b6ca..7149cbf 100644 --- a/csrc/wrapper.cpp +++ b/csrc/wrapper.cpp @@ -13,67 +13,83 @@ #include "ffmpeg/configure/conf.hpp" #include "ffmpeg/format/FormatIn.hpp" #include "ffmpeg/format/FormatOut.hpp" -#include "ffmpeg/property/VideoProp.hpp" #include "ffmpeg/data/CodedData.hpp" -#include "ffmpeg/data/FrameData.hpp" +#include "ffmpeg/property/VideoProp.hpp" #include "ffmpeg/log/log.hpp" #include "ffmpeg/bridge/cvbridge.hpp" #include "buz/recorder.hpp" -using namespace logif; +#include "worker/stream.hpp" +#include "worker/decoder.hpp" +#include "worker/rec.hpp" +#include "CUDALERP.h" + +using namespace logif; using namespace ffwrapper; + +#define DELETE_POINTER(p) \ +do \ +{ \ +if(NULL != p) \ +delete p; \ +p = NULL; \ +}while(0) namespace cffmpeg_wrap{ using namespace buz; Wrapper::Wrapper() :input_url_("") - ,recorder_(NULL) - ,thread_(nullptr) - ,stop_stream_(false) - ,bridge_(NULL) - ,scale_w_(0) - ,scale_h_(0) - ,scale_f_(SWS_POINT) + ,audio_(false) ,gb_(0) ,cpu_(0) - ,encoder_(nullptr) + ,run_dec_(false) + ,thread_(nullptr) + ,stop_stream_(false) + ,stream_(nullptr) + ,decoder_(nullptr) + ,rec_(new rec) + ,logit_(false) { makeTheWorld(); + } + + Wrapper::Wrapper(const char *logfile) + :input_url_("") + ,audio_(false) + ,gb_(0) + ,cpu_(0) + ,run_dec_(false) + ,thread_(nullptr) + ,stop_stream_(false) + ,stream_(nullptr) + ,decoder_(nullptr) + ,rec_(new rec) + ,logit_(true) + { + makeTheWorld(); + logif::CreateLogger(logfile, true); } Wrapper::~Wrapper() { - if(thread_){ - stop_stream_.store(true); - thread_->join(); + try + { + if(thread_){ + stop_stream_.store(true); + thread_->join(); + } + DELETE_POINTER(rec_); } - if(bridge_){ - delete bridge_; bridge_ = NULL; + catch(const std::exception& e) + { + logIt("WRAPPER EXCEPTION: ", e.what()); } - if(recorder_){ - delete recorder_; recorder_ = NULL; - } - if (encoder_){ - delete encoder_; encoder_ = NULL; - } - } - - void Wrapper::ScalePicture(const int w, const int h, const int flags){ - scale_w_ = w; - scale_f_ = flags; - scale_h_ = h; - } - - void Wrapper::UseGB28181(){ - gb_ = 1; - } - - void Wrapper::UseCPU(){ - cpu_ = 1; + if (logit_) + logif::DestroyLogger(); } std::unique_ptr<ffwrapper::FormatIn> Wrapper::init_reader(const char* input){ @@ -96,142 +112,14 @@ } if(flag == 0){ if(!in->findStreamInfo(NULL)){ - logIt("yolo can't find video stream\n"); + logIt("can't find video stream\n"); return nullptr; } - bool flag = true; - if(map_workers_.find(WORKER_DECODER) != map_workers_.end()){ - flag = in->openCodec(AVMEDIA_TYPE_VIDEO, NULL); - auto dec_ctx = in->getCodecContext(); - if(bridge_)delete bridge_; - - scale_w_ = scale_w_ == 0 || scale_w_ > dec_ctx->width ? dec_ctx->width : scale_w_; - scale_h_ = scale_h_ == 0 || scale_h_ > dec_ctx->height ? dec_ctx->height : scale_h_; - - AVPixelFormat pix_fmt = AV_PIX_FMT_BGR24; - bridge_ = new cvbridge( - dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt, - scale_w_, scale_h_, pix_fmt, scale_f_); - - } - if (!flag){ - logIt("FormatIn openCodec Failed!"); - return nullptr; - } + return in; } return nullptr; - } - - // ffwrapper::FormatIn* Wrapper::init_reader_gb28181(const char* input){ - - // VideoProp prop; - // prop.url_ = input; - // prop.rtsp_tcp_ = true; - // prop.gpu_acc_ = !cpu_; - // ffwrapper::FormatIn* in(new FormatIn(prop.gpuAccl())); - // AVDictionary *avdic = prop.optsFormat(); - // int flag = in->openGb28181(input, &avdic); - // if(avdic){ - // logIt("ERROR:in->openGb28181(input, &avdic):flag:%d\n",flag); - // av_dict_free(&avdic); - // } - // if(flag == 0){ - // if(!in->findStreamInfo(NULL)){ - // logIt("yolo can't find video stream\n"); - // return nullptr; - // } - // bool flag = true; - // if(map_workers_.find(WORKER_DECODER) != map_workers_.end()){ - // flag = in->openCodec(AVMEDIA_TYPE_VIDEO, NULL); - // auto dec_ctx = in->getCodecContext(); - // if(bridge_)delete bridge_; - // scale_w_ = scale_w_ == 0 || scale_w_ > dec_ctx->width ? dec_ctx->width : scale_w_; - // scale_h_ = scale_h_ == 0 || scale_h_ > dec_ctx->height ? dec_ctx->height : scale_h_; - // AVPixelFormat pix_fmt = AV_PIX_FMT_BGR24; - // bridge_ = new cvbridge( - // dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt, - // scale_w_, scale_h_, pix_fmt, scale_f_); - // } - // if (!flag){ - // logIt("FormatIn openCodec Failed!"); - // return nullptr; - // } - // return in; - // } - // return nullptr; - // } - - int Wrapper::init_recorder(FormatIn *in, std::string dir, const int mind, const int maxd){ - if(!in){ - logIt("Init wrapper first"); - return -1; - } - if(recorder_){ - logIt("recorder exist"); - delete recorder_; - } - - recorder_ = new Recorder(in); - recorder_->SetCallback([&](int &index, std::string &path){ - cache_rec_info(index, path); - }); - - int trycnt = 0; - while(trycnt < 100){ - const int ret = recorder_->Run(dir.c_str(), mind, maxd); - if(ret == 0) break; - usleep(200000); - } - return trycnt == 100; - } - - void Wrapper::init_worker(ffwrapper::FormatIn *in){ - for(auto &i : map_workers_){ - if(i.second(in) != 0){ - logIt("worker %d init error", i.first); - } - } - } - - void Wrapper::run_stream_thread(){ - - while(!stop_stream_.load()){ - auto in = init_reader(input_url_.c_str()); - - if (!in) { - logIt("ERROR: init_reader! url: %s\n", input_url_.c_str()); - usleep(200000); - continue; - } - init_worker(in.get()); - - int64_t id = 0; - avpacket pkt; - while(!stop_stream_.load()){ - auto data(std::make_shared<CodedData>()); - if(!in->readPacket(data)){ - logIt("read packet error"); - pkt.id = -1; data = nullptr; id = 0; - }else{ - pkt.id = id++; - } - pkt.data = data; - if(data != nullptr) { - cacheAVPacket(data->getAVPacket()); - } - - run_worker(in.get(), pkt); - if(!data){ - break; - } - //test - // if(recorder_) - // if(id % 250 == 0) - // recorder_->FireRecorder(id); - } - } } int Wrapper::RunStream(const char* input){ @@ -249,167 +137,166 @@ return 0; } - //////////////recorder - void Wrapper::BuildRecorder(const char *output, const int mindur, const int maxdur){ - std::string dir(output); - map_workers_[WORKER_RECORDER] = [=](FormatIn *in){ - return init_recorder(in, dir, mindur, maxdur); - }; + void Wrapper::AudioSwitch(const bool a){ + audio_ = a; + // if (stream_){ + // stream_->AudioSwitch(a); + // } } - int Wrapper::FireRecorder(const int64_t &id){ - if(recorder_){ - recorder_->FireRecorder(id); + void Wrapper::init_worker(ffwrapper::FormatIn *in){ + if (rec_->Loaded() && stream_ && decoder_) return; + + stream_ = new stream(in, 3 * in->getFPS()); + // stream_->AudioSwitch(audio_); + + decoder_ = new decoder(in); + + rec_->Load(in); + if(fn_rec_lazy_) { + fn_rec_lazy_(); + fn_rec_lazy_ = nullptr; } } + + int Wrapper::run_worker(ffwrapper::FormatIn *in, std::shared_ptr<ffwrapper::CodedData> data, int64_t &id){ + if (gb_){ + AVPacket &pkt = data->getAVPacket(); + pkt.pts = pkt.dts = AV_NOPTS_VALUE; + } + int flag = 0; + if (stream_) stream_->SetPacket(data, id); + if (decoder_ && run_dec_) flag = decoder_->SetFrame(data, id); + if (rec_->Loaded()) rec_->SetPacket(data, id); - void Wrapper::cache_rec_info(int &index, std::string &path){ - if(func_rec_){ //active api - func_rec_(path, index); - }else{ // passive api - std::lock_guard<std::mutex> l(mutex_rec_); - while(list_rec_.size() > 10){ - for(int i = 0; i < 5; i++){ - list_rec_.pop_front(); - } + return flag; + } + + void Wrapper::deinit_worker(){ + DELETE_POINTER(stream_); + DELETE_POINTER(decoder_); + rec_->Unload(); + } + + void Wrapper::run_stream_thread(){ + + while(!stop_stream_.load()){ + auto in = init_reader(input_url_.c_str()); + + if (!in) { + logIt("ERROR: init_reader! url: %s\n", input_url_.c_str()); + sleep(2); + continue; } - struct record_file_info info; - info.file_frame_index = index; - info.file_path = path; - list_rec_.emplace_back(info); - logIt("list rec files count : %d", list_rec_.size()); + + int wTime = 1000000.0 / in->getFPS() ; + wTime >>= 1; + logIt("WAIT TIME PER FRAME: %d", wTime); + + init_worker(in.get()); + + int64_t id = gb_ ? 0 : -1; + + while(!stop_stream_.load()){ + auto data(std::make_shared<CodedData>()); + if (in->readPacket(&data->getAVPacket()) != 0){ + logIt("read packet error, id: %lld", id); + break; + } + + if (in->notVideoAudio(&data->getAVPacket())){ + continue; + } + + if (!gb_ && id < 0){ + id++; + continue; + } + // decode error + if (run_worker(in.get(), data, id) == -1){ + break; + } + usleep(wTime); + + id++; + } + + deinit_worker(); } } - void Wrapper::GetInfoRecorder(int &index, std::string &path){ - std::lock_guard<std::mutex> l(mutex_rec_); - if(list_rec_.empty()){ - index = -1; - path = ""; - return; + void Wrapper::BuildRecorder(const char* id, const char *output, const int mindur, const int maxdur, const bool audio){ + bool a = audio; + if (gb_) a = false; + + if (rec_->Loaded()){ + rec_->NewRec(id, output, mindur, maxdur, a); + }else{ + std::string rid(id), dir(output); + fn_rec_lazy_ = + [=]{rec_->NewRec(rid.c_str(), dir.c_str(), mindur, maxdur, a);}; } - auto info = list_rec_.front(); - index = info.file_frame_index; - path = info.file_path; - list_rec_.pop_front(); - logIt("go get info index: %d, file: %s\n", index, path.c_str()); } + int Wrapper::FireRecorder(const char* sid,const int64_t &id){ + if (rec_->Loaded()){ + rec_->FireRecSignal(sid, id); + } + } + + void Wrapper::GetInfoRecorder(std::string &recID, int &index, std::string &path){ + if (rec_){ + rec_->GetRecInfo(recID, index, path); + } + } ////////decoder void Wrapper::BuildDecoder(){ - map_workers_[WORKER_DECODER] = [&](FormatIn*){return 0;}; + run_dec_ = true; } - void Wrapper::cache_pic(std::shared_ptr<ffwrapper::FrameData> &frame){ - - pic_bgr24 pic; - if(bridge_){ - AVFrame *frm = frame->getAVFrame(); - pic.w = scale_w_; - pic.h = scale_h_; - - unsigned char *data = (unsigned char*)malloc(pic.w * pic.h * 3); - bridge_->copyPicture(data, frm); - pic.data = data; + void Wrapper::GetPicDecoder(unsigned char **data, int *w, int *h, int *format, int *length, int64_t *id){ + if (decoder_){ + decoder_->GetFrame(data, w, h, format, length, id); } - if(func_dec_){ - func_dec_(pic.data, pic.w, pic.h); - }else{ - std::lock_guard<std::mutex> l(mutex_pic_); - while(list_pic_.size() > 10){ - for(int i = 0; i < 5; i++){ - auto t = list_pic_.front(); - free(t.data); - list_pic_.pop_front(); - } - } - list_pic_.emplace_back(pic); - } - } - - void Wrapper::GetPicDecoder(unsigned char **data, int *w, int *h){ - std::lock_guard<std::mutex> l(mutex_pic_); - if(list_pic_.empty()){ - *data = NULL; - *w = 0; - *h = 0; - return; - } - auto p = list_pic_.front(); - *data = p.data; *w = p.w; *h = p.h; - list_pic_.pop_front(); - } - + void Wrapper::GetPacket(unsigned char **pktData, int *size, int *key){ - std::lock_guard<std::mutex> l(mutex_avpkt_); - if(list_avpkt_.empty()){ - return; - } - auto pkt = list_avpkt_.front(); - *key = pkt.flags & AV_PKT_FLAG_KEY; - *size = pkt.size; - *pktData = (unsigned char *)malloc(*size); - memcpy(*pktData, pkt.data, pkt.size); - - list_avpkt_.pop_front(); - } - void Wrapper::cacheAVPacket(const AVPacket &pkt){ - std::lock_guard<std::mutex> l(mutex_pic_); - while(list_avpkt_.size() > 10){ -// printf("cacheAVPacket drop packets!!!!!!!!!!\n"); - for(int i = 0; i < 5; i++){ - list_avpkt_.pop_front(); - } - } - list_avpkt_.emplace_back(pkt); - } - - void Wrapper::run_worker(ffwrapper::FormatIn *in, avpacket &pkt){ - if(!pkt.data) return; - if (map_workers_.find(WORKER_DECODER) != map_workers_.end()) { - auto frame(std::make_shared<FrameData>()); - auto ret = in->decode(frame, pkt.data); - if(ret == 1){ - //鍚愬嚭鏁版嵁 - cache_pic(frame); - } - } - - if(recorder_){ - recorder_->CachePacket(pkt); + if (stream_){ + stream_->GetPacket(pktData, size, key); } } - ///// active api - void Wrapper::ActiveRecorder(const char *dir, const int mind, const int maxd, - FUNC_REC func){ - BuildRecorder(dir, mind, maxd); - func_rec_ = func; - } +} // end class wrapper +/////////////////////////////////////////////////////////// +///single decode or encoder +////// decoder - void Wrapper::ActiveDecoder(FUNC_DEC fn){ - BuildDecoder(); - func_dec_ = fn; - } +#include "ffmpeg/data/FrameData.hpp" - ////// test - uint8_t *Wrapper::decodeJPEG(const char *file, int *w, int *h){ +// return val: -1 open error; -2, find stream error; -3, converter create +namespace cffmpeg_wrap{ // start test functions + uint8_t* Decode(const char *file, const int gb, int *w, int *h){ VideoProp prop; prop.url_ = file; prop.gpu_acc_ = false; std::unique_ptr<FormatIn> in(new FormatIn(prop.gpuAccl())); - int flag = in->open(file, NULL); - + int flag = -1; + if (gb){ + flag = in->openGb28181(file, NULL); + }else{ + flag = in->open(file, NULL); + } + std::unique_ptr<cvbridge> bridge_(nullptr); if(flag == 0){ if(!in->findStreamInfo(NULL)){ logIt("yolo can't find video stream\n"); + *w = *h = -2; return NULL; } - auto flag = in->openCodec(AVMEDIA_TYPE_VIDEO, NULL); + auto flag = in->openCodec(NULL); if(flag){ auto dec_ctx = in->getCodecContext(); @@ -420,62 +307,37 @@ }else{ logIt("FormatIn openCodec Failed!"); + *w = *h = -3; return NULL; } }else{ - logIt("open %s error", input_url_.c_str()); + logIt("open %s error", file); + *w = *h = -1; return NULL; } - auto data(std::make_shared<CodedData>()); - if(!in->readPacket(data)){ - logIt("read packet error"); - return NULL; + uint8_t *pic = NULL; + *w = *h = 0; + + int tryTime = 0; + while (tryTime++ < 100){ + + auto data(std::make_shared<CodedData>()); + if (in->readPacket(&data->getAVPacket()) == 0){ + + auto frame(std::make_shared<FrameData>()); + AVFrame *frm = frame->getAVFrame(); + if(in->decode(frm, &data->getAVPacket()) == 0){ + *w = frm->width; + *h = frm->height; + pic = bridge_->convert2Data(frm); + break; + } + } } - auto frame(std::make_shared<FrameData>()); - auto ret = in->decode(frame, data); - if(ret == 1){ - AVFrame *frm = frame->getAVFrame(); - *w = frm->width; - *h = frm->height; - unsigned char *data = (unsigned char*)malloc(frm->width * frm->height * 3); - bridge_->copyPicture(data, frm); - return data; - } - return NULL; + + return pic; } - - - void Wrapper::BuildEncoder(const char *file, const int w, const int h, const int fps, const int br, const int gi){ - std::string dir(file); - - map_workers_[WORKER_ENCODER] = [=](FormatIn *in){ - return init_encoder(in, dir.c_str(), w, h, fps, br, gi); - }; - } - - int Wrapper::init_encoder(FormatIn *in, const char *file, const int w, const int h, const int fps, const int br, const int gi){ - if(!in){ - logIt("Init wrapper first"); - return -1; - } - if(encoder_){ - logIt("recorder exist"); - delete encoder_; - } - - VideoProp prop_; - AVCodecContext *ctx = in->getCodecContext(); - if(w == 0 && h == 0){ - prop_.width_ = ctx->width; - prop_.height_ = ctx->height; - } - prop_.sample_aspect_ratio_ = ctx->sample_aspect_ratio; - encoder_ = new FormatOut(prop_, file); - - return 0; - } - /////// for encoder typedef struct _PicEncoder{ FormatOut *enc; @@ -530,40 +392,131 @@ PicEncoder *e = (PicEncoder*)hdl; auto ctx = e->enc->getCodecContext(); + AVPixelFormat pix_fmt = AV_PIX_FMT_BGR24; if (e->bridge == NULL){ - AVPixelFormat pix_fmt = AV_PIX_FMT_BGR24; e->bridge = new cvbridge( w, h, AV_PIX_FMT_BGR24, e->w, e->h, ctx->pix_fmt, e->flag); } - AVFrame *frame = e->bridge->getAVFrame(in, w, h); - auto data(std::make_shared<CodedData>()); + AVFrame *frame = cvbridge::fillFrame(in, w, h, pix_fmt); + AVPacket *pkt = av_packet_alloc(); - const int flag = e->enc->encode(data, frame); - if(flag > 0){ - auto pkt = data->getAVPacket(); + auto flag = e->enc->encode(pkt, frame); + if(flag == 0){ int extradata_size = ctx->extradata_size; uint8_t *extra = ctx->extradata; - *key = pkt.flags & AV_PKT_FLAG_KEY; + *key = pkt->flags & AV_PKT_FLAG_KEY; if(!(*key)){ extradata_size = 0; } - *size = pkt.size + extradata_size; + *size = pkt->size + extradata_size; *out = (unsigned char *)malloc(*size); memcpy(*out, extra, extradata_size); - memcpy(*out + extradata_size, pkt.data, pkt.size); + memcpy(*out + extradata_size, pkt->data, pkt->size); }else{ logIt("encode error or need more packet\n"); } + av_packet_free(&pkt); av_frame_free(&frame); return flag; } +/////////////////////////////////////////////////////////// + typedef struct _conv + { + int srcW; + int srcH; + int srcF; + int dstW; + int dstH; + cvbridge *b; + }Conv; + + void *CreateConvertor(const int srcW, const int srcH, const int srcFormat, + const int dstW, const int dstH, const int dstFormat, const int flag){ + + auto bridge = new cvbridge( + srcW, srcH, srcFormat, + dstW, dstH, dstFormat, flag); + if (!bridge) return NULL; + + Conv *c = (Conv*)malloc(sizeof(Conv)); + c->b = bridge; + c->dstW = dstW; + c->dstH = dstH; + c->srcW = srcW; + c->srcH = srcH; + c->srcF = srcFormat; + + return c; + } + + uint8_t *Convert(void *h, uint8_t *src){ + Conv *c = (Conv*)h; + + auto b = c->b; + + AVFrame *tmp_frm = cvbridge::fillFrame(src, c->srcW, c->srcH, c->srcF); + if (!tmp_frm) return NULL; + + unsigned char *picData = b->convert2Data(tmp_frm); + + av_frame_free(&tmp_frm); + + return picData; + } + + void DestoryConvertor(void *h){ + Conv *c = (Conv*)h; + delete c->b; + free(c); + } + + + uint8_t* ConvertYUV2BGR(uint8_t *src, const int w, const int h, const int dst_w, const int dst_h, int *length){ + return NULL; + + // int oldw = w, oldh = h, neww = dst_w, newh = dst_h; + // // setting cache and shared modes + // cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); + // cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeFourByte); + + // // allocating and transferring image and binding to texture object + // cudaChannelFormatDesc chandesc_img = cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsigned); + // cudaArray* d_img_arr; + // cudaMallocArray(&d_img_arr, &chandesc_img, oldw, oldh, cudaArrayTextureGather); + // cudaMemcpyToArray(d_img_arr, 0, 0, image, oldh * oldw, cudaMemcpyHostToDevice); + // struct cudaResourceDesc resdesc_img; + // memset(&resdesc_img, 0, sizeof(resdesc_img)); + // resdesc_img.resType = cudaResourceTypeArray; + // resdesc_img.res.array.array = d_img_arr; + // struct cudaTextureDesc texdesc_img; + // memset(&texdesc_img, 0, sizeof(texdesc_img)); + // texdesc_img.addressMode[0] = cudaAddressModeClamp; + // texdesc_img.addressMode[1] = cudaAddressModeClamp; + // texdesc_img.readMode = cudaReadModeNormalizedFloat; + // texdesc_img.filterMode = cudaFilterModePoint; + // texdesc_img.normalizedCoords = 0; + // cudaTextureObject_t d_img_tex = 0; + // cudaCreateTextureObject(&d_img_tex, &resdesc_img, &texdesc_img, nullptr); + + // uint8_t* d_out = nullptr; + // cudaMalloc(&d_out, total); + + // for (int i = 0; i < warmups; ++i) CUDALERP(d_img_tex, oldw, oldh, d_out, neww, newh); + // auto start = high_resolution_clock::now(); + // for (int i = 0; i < runs; ++i) CUDALERP(d_img_tex, oldw, oldh, d_out, neww, newh); + // auto end = high_resolution_clock::now(); + // auto sum = (end - start) / runs; + + // auto h_out = new uint8_t[neww * newh]; + // cudaMemcpy(h_out, d_out, total, cudaMemcpyDeviceToHost); + } } -- Gitblit v1.8.0