From 080acae08ec8cfe413c3e6e45bcf7f9222dfa02d Mon Sep 17 00:00:00 2001 From: zhangmeng <775834166@qq.com> Date: 星期四, 24 十月 2019 16:50:28 +0800 Subject: [PATCH] update --- csrc/wrapper.cpp | 140 +++++++++++++++++++--------------------------- 1 files changed, 57 insertions(+), 83 deletions(-) diff --git a/csrc/wrapper.cpp b/csrc/wrapper.cpp index 2643ce0..86662a1 100644 --- a/csrc/wrapper.cpp +++ b/csrc/wrapper.cpp @@ -2,6 +2,7 @@ #include <thread> #include <unistd.h> +#include <sys/time.h> extern "C"{ #include <libavformat/avformat.h> @@ -23,8 +24,7 @@ #include "worker/stream.hpp" #include "worker/decoder.hpp" #include "worker/rec.hpp" - -#include "CUDALERP.h" +#include "common.hpp" using namespace logif; using namespace ffwrapper; @@ -52,6 +52,7 @@ ,decoder_(nullptr) ,rec_(new rec) ,logit_(false) + ,fps_(25) { makeTheWorld(); } @@ -159,14 +160,17 @@ } } - void Wrapper::run_worker(ffwrapper::FormatIn *in, std::shared_ptr<ffwrapper::CodedData> data, int64_t &id){ + int Wrapper::run_worker(ffwrapper::FormatIn *in, const CPacket &pkt){ if (gb_){ - AVPacket &pkt = data->getAVPacket(); - pkt.pts = pkt.dts = AV_NOPTS_VALUE; + AVPacket &p = pkt.data->getAVPacket(); + p.pts = p.dts = AV_NOPTS_VALUE; } - if (stream_) stream_->SetPacket(data, id); - if (decoder_ && run_dec_) decoder_->SetFrame(data, id); - if (rec_->Loaded()) rec_->SetPacket(data, id); + int flag = 0; + if (stream_) stream_->SetPacket(pkt); + if (decoder_ && run_dec_) flag = decoder_->SetFrame(pkt); + if (rec_->Loaded()) rec_->SetPacket(pkt); + + return flag; } void Wrapper::deinit_worker(){ @@ -185,6 +189,8 @@ sleep(2); continue; } + + fps_ = in->getFPS(); int wTime = 1000000.0 / in->getFPS() ; wTime >>= 1; @@ -193,6 +199,10 @@ init_worker(in.get()); int64_t id = gb_ ? 0 : -1; + int64_t v_id = id; + int64_t a_id = id; + + bool exist = access(input_url_.c_str(), 0) == 0 ? true : false; while(!stop_stream_.load()){ auto data(std::make_shared<CodedData>()); @@ -200,36 +210,54 @@ logIt("read packet error, id: %lld", id); break; } - + // 闈為煶瑙嗛 if (in->notVideoAudio(&data->getAVPacket())){ continue; } - + // 闈炲浗鏍囪烦杩囩涓�甯�,娴嬭瘯绗竴甯ф湁闂 if (!gb_ && id < 0){ - id++; + id++; v_id++; a_id++; continue; } - - run_worker(in.get(), data, id); - usleep(wTime); + CPacket pkt{data, v_id, a_id, id}; + // decode error + if (run_worker(in.get(), pkt) == -1){ + break; + } + + if (in->isVideoPkt(&data->getAVPacket())){ + v_id++; + }else{ + a_id++; + } id++; + + //鏈湴鏂囦欢澶揩sleep涓�涓� + if (exist){ + usleep(wTime); + } + } deinit_worker(); } } - void Wrapper::BuildRecorder(const char* id, const char *output, const int mindur, const int maxdur, const bool audio){ + void Wrapper::SetRecMinCacheTime(const int mind){ + rec_->SetRecMinCacheTime(mind); + } + + void Wrapper::BuildRecorder(const char* id, const char *output, const int64_t &fid, const int mindur, const int maxdur, const bool audio){ bool a = audio; if (gb_) a = false; - + if (rec_->Loaded()){ - rec_->NewRec(id, output, mindur, maxdur, a); + rec_->NewRec(id, output, fid, mindur, maxdur, a); }else{ std::string rid(id), dir(output); fn_rec_lazy_ = - [=]{rec_->NewRec(rid.c_str(), dir.c_str(), mindur, maxdur, a);}; + [=]{rec_->NewRec(rid.c_str(), dir.c_str(), fid, mindur, maxdur, a);}; } } @@ -325,8 +353,7 @@ if(in->decode(frm, &data->getAVPacket()) == 0){ *w = frm->width; *h = frm->height; - pic = (unsigned char*)malloc(frm->width * frm->height * 3); - bridge_->copyPicture(pic, frm); + pic = bridge_->convert2Data(frm); break; } } @@ -388,14 +415,14 @@ PicEncoder *e = (PicEncoder*)hdl; auto ctx = e->enc->getCodecContext(); + AVPixelFormat pix_fmt = AV_PIX_FMT_BGR24; if (e->bridge == NULL){ - AVPixelFormat pix_fmt = AV_PIX_FMT_BGR24; e->bridge = new cvbridge( w, h, AV_PIX_FMT_BGR24, e->w, e->h, ctx->pix_fmt, e->flag); } - AVFrame *frame = e->bridge->getAVFrame(in, w, h); + AVFrame *frame = cvbridge::fillFrame(in, w, h, pix_fmt); AVPacket *pkt = av_packet_alloc(); auto flag = e->enc->encode(pkt, frame); @@ -435,11 +462,11 @@ }Conv; void *CreateConvertor(const int srcW, const int srcH, const int srcFormat, - const int dstW, const int dstH, const int flag){ - AVPixelFormat pix_fmt = AV_PIX_FMT_BGR24; + const int dstW, const int dstH, const int dstFormat, const int flag){ + auto bridge = new cvbridge( srcW, srcH, srcFormat, - dstW, dstH, pix_fmt, flag); + dstW, dstH, dstFormat, flag); if (!bridge) return NULL; Conv *c = (Conv*)malloc(sizeof(Conv)); @@ -458,26 +485,13 @@ auto b = c->b; - AVFrame *tmp_frm = av_frame_alloc(); - tmp_frm->format = (AVPixelFormat)c->srcF; - tmp_frm->width = c->srcW; - tmp_frm->height = c->srcH; - - //create a AVPicture frame from the opencv Mat input image - int ret = avpicture_fill((AVPicture *)tmp_frm, - (uint8_t *)src, - (AVPixelFormat)tmp_frm->format, - tmp_frm->width, - tmp_frm->height); + AVFrame *tmp_frm = cvbridge::fillFrame(src, c->srcW, c->srcH, c->srcF); + if (!tmp_frm) return NULL; - unsigned char *picData = NULL; - if (ret > 0){ - picData = (unsigned char*)malloc(c->dstW * c->dstH * 3); - b->copyPicture(picData, tmp_frm); - } - + unsigned char *picData = b->convert2Data(tmp_frm); + av_frame_free(&tmp_frm); - + return picData; } @@ -487,45 +501,5 @@ free(c); } - - uint8_t* ConvertYUV2BGR(uint8_t *src, const int w, const int h, const int dst_w, const int dst_h, int *length){ - return NULL; - - // int oldw = w, oldh = h, neww = dst_w, newh = dst_h; - // // setting cache and shared modes - // cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); - // cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeFourByte); - - // // allocating and transferring image and binding to texture object - // cudaChannelFormatDesc chandesc_img = cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsigned); - // cudaArray* d_img_arr; - // cudaMallocArray(&d_img_arr, &chandesc_img, oldw, oldh, cudaArrayTextureGather); - // cudaMemcpyToArray(d_img_arr, 0, 0, image, oldh * oldw, cudaMemcpyHostToDevice); - // struct cudaResourceDesc resdesc_img; - // memset(&resdesc_img, 0, sizeof(resdesc_img)); - // resdesc_img.resType = cudaResourceTypeArray; - // resdesc_img.res.array.array = d_img_arr; - // struct cudaTextureDesc texdesc_img; - // memset(&texdesc_img, 0, sizeof(texdesc_img)); - // texdesc_img.addressMode[0] = cudaAddressModeClamp; - // texdesc_img.addressMode[1] = cudaAddressModeClamp; - // texdesc_img.readMode = cudaReadModeNormalizedFloat; - // texdesc_img.filterMode = cudaFilterModePoint; - // texdesc_img.normalizedCoords = 0; - // cudaTextureObject_t d_img_tex = 0; - // cudaCreateTextureObject(&d_img_tex, &resdesc_img, &texdesc_img, nullptr); - - // uint8_t* d_out = nullptr; - // cudaMalloc(&d_out, total); - - // for (int i = 0; i < warmups; ++i) CUDALERP(d_img_tex, oldw, oldh, d_out, neww, newh); - // auto start = high_resolution_clock::now(); - // for (int i = 0; i < runs; ++i) CUDALERP(d_img_tex, oldw, oldh, d_out, neww, newh); - // auto end = high_resolution_clock::now(); - // auto sum = (end - start) / runs; - - // auto h_out = new uint8_t[neww * newh]; - // cudaMemcpy(h_out, d_out, total, cudaMemcpyDeviceToHost); - } } -- Gitblit v1.8.0