From 68a19a73681301c6712e10d55bc64324716dbd24 Mon Sep 17 00:00:00 2001 From: zhangmeng <775834166@qq.com> Date: 星期三, 09 十月 2019 15:38:47 +0800 Subject: [PATCH] split scale --- csrc/wrapper.cpp | 600 ++++++++++++++++++++++++++++------------------------------- 1 files changed, 282 insertions(+), 318 deletions(-) diff --git a/csrc/wrapper.cpp b/csrc/wrapper.cpp index 3bf1b57..2643ce0 100644 --- a/csrc/wrapper.cpp +++ b/csrc/wrapper.cpp @@ -13,36 +13,64 @@ #include "ffmpeg/configure/conf.hpp" #include "ffmpeg/format/FormatIn.hpp" #include "ffmpeg/format/FormatOut.hpp" -#include "ffmpeg/property/VideoProp.hpp" #include "ffmpeg/data/CodedData.hpp" -#include "ffmpeg/data/FrameData.hpp" +#include "ffmpeg/property/VideoProp.hpp" #include "ffmpeg/log/log.hpp" #include "ffmpeg/bridge/cvbridge.hpp" #include "buz/recorder.hpp" -using namespace logif; +#include "worker/stream.hpp" +#include "worker/decoder.hpp" +#include "worker/rec.hpp" +#include "CUDALERP.h" + +using namespace logif; using namespace ffwrapper; + +#define DELETE_POINTER(p) \ +do \ +{ \ +if(NULL != p) \ +delete p; \ +p = NULL; \ +}while(0) namespace cffmpeg_wrap{ using namespace buz; Wrapper::Wrapper() :input_url_("") - ,thread_(nullptr) - ,stop_stream_(false) - ,bridge_(NULL) - ,scale_w_(0) - ,scale_h_(0) - ,scale_f_(SWS_POINT) + ,audio_(false) ,gb_(0) ,cpu_(0) - ,use_decoder_(false) - ,minduration(250) - ,maxduration(750) + ,run_dec_(false) + ,thread_(nullptr) + ,stop_stream_(false) + ,stream_(nullptr) + ,decoder_(nullptr) + ,rec_(new rec) + ,logit_(false) { makeTheWorld(); + } + + Wrapper::Wrapper(const char *logfile) + :input_url_("") + ,audio_(false) + ,gb_(0) + ,cpu_(0) + ,run_dec_(false) + ,thread_(nullptr) + ,stop_stream_(false) + ,stream_(nullptr) + ,decoder_(nullptr) + ,rec_(new rec) + ,logit_(true) + { + makeTheWorld(); + logif::CreateLogger(logfile, true); } @@ -54,37 +82,14 @@ stop_stream_.store(true); thread_->join(); } - if(bridge_){ - delete bridge_; bridge_ = NULL; - } - - map_rec_.clear(); - list_rec_pkt_.clear(); - - for(auto &i : list_pic_){ - free(i.data); - } + DELETE_POINTER(rec_); } catch(const std::exception& e) { logIt("WRAPPER EXCEPTION: ", e.what()); } - - - } - - void Wrapper::ScalePicture(const int w, const int h, const int flags){ - scale_w_ = w; - scale_f_ = flags; - scale_h_ = h; - } - - void Wrapper::UseGB28181(){ - gb_ = 1; - } - - void Wrapper::UseCPU(){ - cpu_ = 1; + if (logit_) + logif::DestroyLogger(); } std::unique_ptr<ffwrapper::FormatIn> Wrapper::init_reader(const char* input){ @@ -107,7 +112,7 @@ } if(flag == 0){ if(!in->findStreamInfo(NULL)){ - logIt("yolo can't find video stream\n"); + logIt("can't find video stream\n"); return nullptr; } @@ -132,312 +137,161 @@ return 0; } + void Wrapper::AudioSwitch(const bool a){ + audio_ = a; + // if (stream_){ + // stream_->AudioSwitch(a); + // } + } + + void Wrapper::init_worker(ffwrapper::FormatIn *in){ + if (rec_->Loaded() && stream_ && decoder_) return; + + stream_ = new stream(in, 3 * in->getFPS()); + // stream_->AudioSwitch(audio_); + + decoder_ = new decoder(in); + + rec_->Load(in); + if(fn_rec_lazy_) { + fn_rec_lazy_(); + fn_rec_lazy_ = nullptr; + } + } + + void Wrapper::run_worker(ffwrapper::FormatIn *in, std::shared_ptr<ffwrapper::CodedData> data, int64_t &id){ + if (gb_){ + AVPacket &pkt = data->getAVPacket(); + pkt.pts = pkt.dts = AV_NOPTS_VALUE; + } + if (stream_) stream_->SetPacket(data, id); + if (decoder_ && run_dec_) decoder_->SetFrame(data, id); + if (rec_->Loaded()) rec_->SetPacket(data, id); + } + + void Wrapper::deinit_worker(){ + DELETE_POINTER(stream_); + DELETE_POINTER(decoder_); + rec_->Unload(); + } + void Wrapper::run_stream_thread(){ while(!stop_stream_.load()){ auto in = init_reader(input_url_.c_str()); - + if (!in) { logIt("ERROR: init_reader! url: %s\n", input_url_.c_str()); - usleep(200000); + sleep(2); continue; } + + int wTime = 1000000.0 / in->getFPS() ; + wTime >>= 1; + logIt("WAIT TIME PER FRAME: %d", wTime); - int64_t id = 0; - avpacket pkt; + init_worker(in.get()); + + int64_t id = gb_ ? 0 : -1; + while(!stop_stream_.load()){ auto data(std::make_shared<CodedData>()); - if(!in->readPacket(data)){ - logIt("read packet error"); - pkt.id = -1; data = nullptr; id = 0; - }else{ - pkt.id = id++; - } - pkt.data = data; - if(data != nullptr) { - cacheAVPacket(data->getAVPacket()); - } - - run_worker(in.get(), pkt); - if(!data){ - map_rec_.clear(); - std::lock_guard<std::mutex> locker(mtx_rec_pkt_); - list_rec_pkt_.clear(); - + if (in->readPacket(&data->getAVPacket()) != 0){ + logIt("read packet error, id: %lld", id); break; } - //test - // if(recorder_) - // if(id % 250 == 0) - // recorder_->FireRecorder(id); - } - } - } - void Wrapper::run_worker(ffwrapper::FormatIn *in, avpacket &pkt){ - if(!pkt.data) return; - if (use_decoder_) { - if(in->getCodecContext() == NULL){ + if (in->notVideoAudio(&data->getAVPacket())){ + continue; + } - bool flag = true; - flag = in->openCodec(AVMEDIA_TYPE_VIDEO, NULL); - auto dec_ctx = in->getCodecContext(); - if(bridge_)delete bridge_; - - scale_w_ = scale_w_ == 0 || scale_w_ > dec_ctx->width ? dec_ctx->width : scale_w_; - scale_h_ = scale_h_ == 0 || scale_h_ > dec_ctx->height ? dec_ctx->height : scale_h_; - - AVPixelFormat pix_fmt = AV_PIX_FMT_BGR24; - bridge_ = new cvbridge( - dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt, - scale_w_, scale_h_, pix_fmt, scale_f_); - - if (!flag){ - logIt("FormatIn openCodec Failed!"); + if (!gb_ && id < 0){ + id++; + continue; } + + run_worker(in.get(), data, id); + usleep(wTime); + + id++; } - - auto frame(std::make_shared<FrameData>()); - auto ret = in->decode(frame, pkt.data); - if(ret == 1){ - //鍚愬嚭鏁版嵁 - cache_pic(frame, pkt.id); - } - } - cache_rec_pkt(pkt); - for(auto &i : map_rec_){ - if (!i.second.rec){ - i.second.rec = i.second.fn_init(in); - if (i.second.rec){ - std::lock_guard<std::mutex> locker(mtx_rec_pkt_); - for(auto &k : list_rec_pkt_){ - avpacket p = {k.data, k.id}; - i.second.rec->CachePacket(p); - } - logIt("START REC %d FRAMES", list_rec_pkt_.size()); - } - }else if (i.second.rec){ - i.second.rec->CachePacket(pkt); - } + + deinit_worker(); } } - int Wrapper::cache_rec_pkt(const avpacket &pkt){ + void Wrapper::BuildRecorder(const char* id, const char *output, const int mindur, const int maxdur, const bool audio){ + bool a = audio; + if (gb_) a = false; - std::lock_guard<std::mutex> locker(mtx_rec_pkt_); - //wait I - if (list_rec_pkt_.empty()) { - AVPacket &avpkt = pkt.data->getAVPacket(); - if (!(avpkt.flags & AV_PKT_FLAG_KEY)){ - return -1; - } + if (rec_->Loaded()){ + rec_->NewRec(id, output, mindur, maxdur, a); + }else{ + std::string rid(id), dir(output); + fn_rec_lazy_ = + [=]{rec_->NewRec(rid.c_str(), dir.c_str(), mindur, maxdur, a);}; } - maybe_dump_rec_pkt(); - recpkt k = {pkt.data, pkt.id}; - list_rec_pkt_.push_back(k); - - return 0; - } - void Wrapper::maybe_dump_rec_pkt(){ - //瓒呰繃min/2,涓㈠純gop - while (list_rec_pkt_.size() > minduration) { - list_rec_pkt_.pop_front(); - while(!list_rec_pkt_.empty()){ - auto &cache = list_rec_pkt_.front(); - AVPacket &avpkt = cache.data->getAVPacket(); - if (!(avpkt.flags & AV_PKT_FLAG_KEY)){ - list_rec_pkt_.pop_front(); - }else{ - break; - } - } - } - } - - //////////////recorder - std::shared_ptr<Recorder> Wrapper::init_recorder(FormatIn *in, std::string id, std::string dir, const int mind, const int maxd){ - if(!in){ - logIt("Init wrapper first"); - return nullptr; - } - - auto rec = std::make_shared<Recorder>(in, id.c_str()); - - rec->SetCallback([&](std::string &id, int &index, std::string &path){ - cache_rec_info(id, index, path); - }); - - int trycnt = 0; - while(trycnt < 100){ - const int ret = rec->Run(dir.c_str(), mind, maxd); - if(ret == 0) break; - usleep(200000); - } - if (trycnt < 100){ - return rec; - } - return nullptr; - } - - void Wrapper::BuildRecorder(const char* id, const char *output, const int mindur, const int maxdur){ - std::string rid(id); - std::string dir(output); - - auto fn = [=](FormatIn *in){ - return init_recorder(in, rid, dir, mindur, maxdur); - }; - std::shared_ptr<Recorder> rec(nullptr); - - FnRec r = FnRec{fn, rec}; - map_rec_[rid] = r; - - minduration = mindur * 25; - maxduration = maxdur * 25; } int Wrapper::FireRecorder(const char* sid,const int64_t &id){ - auto iter = map_rec_.find(sid); - if (iter != map_rec_.end()){ - if(iter->second.rec){ - iter->second.rec->FireRecorder(id); - } + if (rec_->Loaded()){ + rec_->FireRecSignal(sid, id); } } - void Wrapper::cache_rec_info(std::string &id, int &index, std::string &path){ - - std::lock_guard<std::mutex> l(mutex_rec_); - while(list_rec_.size() > 100){ - for(int i = 0; i < 25; i++){ - list_rec_.pop_front(); - } + void Wrapper::GetInfoRecorder(std::string &recID, int &index, std::string &path){ + if (rec_){ + rec_->GetRecInfo(recID, index, path); } - struct record_file_info info; - info.file_frame_index = index; - info.file_path = path; - info.rec_id = id; - list_rec_.emplace_back(info); - list_rec_map_[path] = id; - logIt("LIST REC FILES COUNT : %d", list_rec_.size()); - - } - - void Wrapper::GetInfoRecorder(int &index, std::string &path){ - std::lock_guard<std::mutex> l(mutex_rec_); - if(list_rec_.empty()){ - index = -1; - path = ""; - return; - } - auto info = list_rec_.front(); - index = info.file_frame_index; - path = info.file_path; - list_rec_.pop_front(); - - if (map_rec_.find(info.rec_id) != map_rec_.end()) - map_rec_.erase(info.rec_id); - // logIt("go get info index: %d, file: %s\n", index, path.c_str()); - } - - std::string Wrapper::GetRecorderID(const std::string &path){ - std::string ret(""); - auto iter = list_rec_map_.find(path); - if (iter != list_rec_map_.end()){ - ret = iter->second; - list_rec_map_.erase(iter); - } - - - return ret; } ////////decoder void Wrapper::BuildDecoder(){ - use_decoder_ = true; + run_dec_ = true; } - void Wrapper::cache_pic(std::shared_ptr<ffwrapper::FrameData> &frame, int64_t &id){ - - pic_bgr24 pic; - if(bridge_){ - AVFrame *frm = frame->getAVFrame(); - pic.w = scale_w_; - pic.h = scale_h_; - - unsigned char *data = (unsigned char*)malloc(pic.w * pic.h * 3); - bridge_->copyPicture(data, frm); - pic.data = data; - pic.id = id; + void Wrapper::GetPicDecoder(unsigned char **data, int *w, int *h, int *format, int *length, int64_t *id){ + if (decoder_){ + decoder_->GetFrame(data, w, h, format, length, id); } - - { - std::lock_guard<std::mutex> l(mutex_pic_); - while(list_pic_.size() > 10){ - for(int i = 0; i < 5; i++){ - auto t = list_pic_.front(); - free(t.data); - list_pic_.pop_front(); - } - } - list_pic_.emplace_back(pic); - } - } - - void Wrapper::GetPicDecoder(unsigned char **data, int *w, int *h, int64_t *id){ - std::lock_guard<std::mutex> l(mutex_pic_); - if(list_pic_.empty()){ - *data = NULL; - *w = 0; - *h = 0; - return; - } - auto p = list_pic_.front(); - *data = p.data; *w = p.w; *h = p.h; - *id = p.id; - list_pic_.pop_front(); - } - + void Wrapper::GetPacket(unsigned char **pktData, int *size, int *key){ - std::lock_guard<std::mutex> l(mutex_avpkt_); - if(list_avpkt_.empty()){ - return; + if (stream_){ + stream_->GetPacket(pktData, size, key); } - auto pkt = list_avpkt_.front(); - *key = pkt.flags & AV_PKT_FLAG_KEY; - *size = pkt.size; - *pktData = (unsigned char *)malloc(*size); - memcpy(*pktData, pkt.data, pkt.size); - - list_avpkt_.pop_front(); - } - void Wrapper::cacheAVPacket(const AVPacket &pkt){ - std::lock_guard<std::mutex> l(mutex_pic_); - while(list_avpkt_.size() > 10){ -// printf("cacheAVPacket drop packets!!!!!!!!!!\n"); - for(int i = 0; i < 5; i++){ - list_avpkt_.pop_front(); - } - } - list_avpkt_.emplace_back(pkt); } - ////// test - uint8_t *Wrapper::decodeJPEG(const char *file, int *w, int *h){ +} // end class wrapper +/////////////////////////////////////////////////////////// +///single decode or encoder +////// decoder + +#include "ffmpeg/data/FrameData.hpp" + +// return val: -1 open error; -2, find stream error; -3, converter create +namespace cffmpeg_wrap{ // start test functions + uint8_t* Decode(const char *file, const int gb, int *w, int *h){ VideoProp prop; prop.url_ = file; prop.gpu_acc_ = false; std::unique_ptr<FormatIn> in(new FormatIn(prop.gpuAccl())); - int flag = in->open(file, NULL); - + int flag = -1; + if (gb){ + flag = in->openGb28181(file, NULL); + }else{ + flag = in->open(file, NULL); + } + std::unique_ptr<cvbridge> bridge_(nullptr); if(flag == 0){ if(!in->findStreamInfo(NULL)){ logIt("yolo can't find video stream\n"); + *w = *h = -2; return NULL; } - auto flag = in->openCodec(AVMEDIA_TYPE_VIDEO, NULL); + auto flag = in->openCodec(NULL); if(flag){ auto dec_ctx = in->getCodecContext(); @@ -448,32 +302,38 @@ }else{ logIt("FormatIn openCodec Failed!"); + *w = *h = -3; return NULL; } }else{ - logIt("open %s error", input_url_.c_str()); + logIt("open %s error", file); + *w = *h = -1; return NULL; } - auto data(std::make_shared<CodedData>()); - if(!in->readPacket(data)){ - logIt("read packet error"); - return NULL; + uint8_t *pic = NULL; + *w = *h = 0; + + int tryTime = 0; + while (tryTime++ < 100){ + + auto data(std::make_shared<CodedData>()); + if (in->readPacket(&data->getAVPacket()) == 0){ + + auto frame(std::make_shared<FrameData>()); + AVFrame *frm = frame->getAVFrame(); + if(in->decode(frm, &data->getAVPacket()) == 0){ + *w = frm->width; + *h = frm->height; + pic = (unsigned char*)malloc(frm->width * frm->height * 3); + bridge_->copyPicture(pic, frm); + break; + } + } } - auto frame(std::make_shared<FrameData>()); - auto ret = in->decode(frame, data); - if(ret == 1){ - AVFrame *frm = frame->getAVFrame(); - *w = frm->width; - *h = frm->height; - unsigned char *data = (unsigned char*)malloc(frm->width * frm->height * 3); - bridge_->copyPicture(data, frm); - return data; - } - return NULL; + + return pic; } - - /////// for encoder typedef struct _PicEncoder{ FormatOut *enc; @@ -536,32 +396,136 @@ } AVFrame *frame = e->bridge->getAVFrame(in, w, h); - auto data(std::make_shared<CodedData>()); + AVPacket *pkt = av_packet_alloc(); - const int flag = e->enc->encode(data, frame); - if(flag > 0){ - auto pkt = data->getAVPacket(); + auto flag = e->enc->encode(pkt, frame); + if(flag == 0){ int extradata_size = ctx->extradata_size; uint8_t *extra = ctx->extradata; - *key = pkt.flags & AV_PKT_FLAG_KEY; + *key = pkt->flags & AV_PKT_FLAG_KEY; if(!(*key)){ extradata_size = 0; } - *size = pkt.size + extradata_size; + *size = pkt->size + extradata_size; *out = (unsigned char *)malloc(*size); memcpy(*out, extra, extradata_size); - memcpy(*out + extradata_size, pkt.data, pkt.size); + memcpy(*out + extradata_size, pkt->data, pkt->size); }else{ logIt("encode error or need more packet\n"); } + av_packet_free(&pkt); av_frame_free(&frame); return flag; } +/////////////////////////////////////////////////////////// + typedef struct _conv + { + int srcW; + int srcH; + int srcF; + int dstW; + int dstH; + cvbridge *b; + }Conv; + + void *CreateConvertor(const int srcW, const int srcH, const int srcFormat, + const int dstW, const int dstH, const int flag){ + AVPixelFormat pix_fmt = AV_PIX_FMT_BGR24; + auto bridge = new cvbridge( + srcW, srcH, srcFormat, + dstW, dstH, pix_fmt, flag); + if (!bridge) return NULL; + + Conv *c = (Conv*)malloc(sizeof(Conv)); + c->b = bridge; + c->dstW = dstW; + c->dstH = dstH; + c->srcW = srcW; + c->srcH = srcH; + c->srcF = srcFormat; + + return c; + } + + uint8_t *Convert(void *h, uint8_t *src){ + Conv *c = (Conv*)h; + + auto b = c->b; + + AVFrame *tmp_frm = av_frame_alloc(); + tmp_frm->format = (AVPixelFormat)c->srcF; + tmp_frm->width = c->srcW; + tmp_frm->height = c->srcH; + + //create a AVPicture frame from the opencv Mat input image + int ret = avpicture_fill((AVPicture *)tmp_frm, + (uint8_t *)src, + (AVPixelFormat)tmp_frm->format, + tmp_frm->width, + tmp_frm->height); + + unsigned char *picData = NULL; + if (ret > 0){ + picData = (unsigned char*)malloc(c->dstW * c->dstH * 3); + b->copyPicture(picData, tmp_frm); + } + + av_frame_free(&tmp_frm); + + return picData; + } + + void DestoryConvertor(void *h){ + Conv *c = (Conv*)h; + delete c->b; + free(c); + } + + + uint8_t* ConvertYUV2BGR(uint8_t *src, const int w, const int h, const int dst_w, const int dst_h, int *length){ + return NULL; + + // int oldw = w, oldh = h, neww = dst_w, newh = dst_h; + // // setting cache and shared modes + // cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); + // cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeFourByte); + + // // allocating and transferring image and binding to texture object + // cudaChannelFormatDesc chandesc_img = cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsigned); + // cudaArray* d_img_arr; + // cudaMallocArray(&d_img_arr, &chandesc_img, oldw, oldh, cudaArrayTextureGather); + // cudaMemcpyToArray(d_img_arr, 0, 0, image, oldh * oldw, cudaMemcpyHostToDevice); + // struct cudaResourceDesc resdesc_img; + // memset(&resdesc_img, 0, sizeof(resdesc_img)); + // resdesc_img.resType = cudaResourceTypeArray; + // resdesc_img.res.array.array = d_img_arr; + // struct cudaTextureDesc texdesc_img; + // memset(&texdesc_img, 0, sizeof(texdesc_img)); + // texdesc_img.addressMode[0] = cudaAddressModeClamp; + // texdesc_img.addressMode[1] = cudaAddressModeClamp; + // texdesc_img.readMode = cudaReadModeNormalizedFloat; + // texdesc_img.filterMode = cudaFilterModePoint; + // texdesc_img.normalizedCoords = 0; + // cudaTextureObject_t d_img_tex = 0; + // cudaCreateTextureObject(&d_img_tex, &resdesc_img, &texdesc_img, nullptr); + + // uint8_t* d_out = nullptr; + // cudaMalloc(&d_out, total); + + // for (int i = 0; i < warmups; ++i) CUDALERP(d_img_tex, oldw, oldh, d_out, neww, newh); + // auto start = high_resolution_clock::now(); + // for (int i = 0; i < runs; ++i) CUDALERP(d_img_tex, oldw, oldh, d_out, neww, newh); + // auto end = high_resolution_clock::now(); + // auto sum = (end - start) / runs; + + // auto h_out = new uint8_t[neww * newh]; + // cudaMemcpy(h_out, d_out, total, cudaMemcpyDeviceToHost); + } } -- Gitblit v1.8.0