From f452c5e3ff1246ab5c23088f156abfbbfc13101d Mon Sep 17 00:00:00 2001
From: zhangmeng <775834166@qq.com>
Date: 星期一, 14 十月 2019 13:53:38 +0800
Subject: [PATCH] update;
---
csrc/wrapper.cpp | 243 +++++++++++++++++++++++++++++++++++++++---------
1 files changed, 198 insertions(+), 45 deletions(-)
diff --git a/csrc/wrapper.cpp b/csrc/wrapper.cpp
index 757f0bb..ece3ddf 100644
--- a/csrc/wrapper.cpp
+++ b/csrc/wrapper.cpp
@@ -24,6 +24,8 @@
#include "worker/decoder.hpp"
#include "worker/rec.hpp"
+#include "CUDALERP.h"
+
using namespace logif;
using namespace ffwrapper;
@@ -40,19 +42,35 @@
Wrapper::Wrapper()
:input_url_("")
- ,scale_w_(0)
- ,scale_h_(0)
- ,scale_f_(SWS_POINT)
,audio_(false)
,gb_(0)
,cpu_(0)
+ ,run_dec_(false)
,thread_(nullptr)
,stop_stream_(false)
,stream_(nullptr)
,decoder_(nullptr)
,rec_(new rec)
+ ,logit_(false)
{
makeTheWorld();
+ }
+
+ Wrapper::Wrapper(const char *logfile)
+ :input_url_("")
+ ,audio_(false)
+ ,gb_(0)
+ ,cpu_(0)
+ ,run_dec_(false)
+ ,thread_(nullptr)
+ ,stop_stream_(false)
+ ,stream_(nullptr)
+ ,decoder_(nullptr)
+ ,rec_(new rec)
+ ,logit_(true)
+ {
+ makeTheWorld();
+ logif::CreateLogger(logfile, true);
}
@@ -70,6 +88,8 @@
{
logIt("WRAPPER EXCEPTION: ", e.what());
}
+ if (logit_)
+ logif::DestroyLogger();
}
std::unique_ptr<ffwrapper::FormatIn> Wrapper::init_reader(const char* input){
@@ -119,28 +139,37 @@
void Wrapper::AudioSwitch(const bool a){
audio_ = a;
- if (stream_){
- stream_->AudioSwitch(a);
- }
+ // if (stream_){
+ // stream_->AudioSwitch(a);
+ // }
}
void Wrapper::init_worker(ffwrapper::FormatIn *in){
if (rec_->Loaded() && stream_ && decoder_) return;
- stream_ = new stream(in, 3 * 25);
- stream_->AudioSwitch(audio_);
+ stream_ = new stream(in, 3 * in->getFPS());
+ // stream_->AudioSwitch(audio_);
- decoder_ = new decoder(in, scale_w_, scale_h_, scale_f_);
+ decoder_ = new decoder(in);
rec_->Load(in);
- if(fn_rec_lazy_) fn_rec_lazy_(in);
+ if(fn_rec_lazy_) {
+ fn_rec_lazy_();
+ fn_rec_lazy_ = nullptr;
+ }
}
- void Wrapper::run_worker(ffwrapper::FormatIn *in, std::shared_ptr<ffwrapper::CodedData> data, int64_t &id){
-
+ int Wrapper::run_worker(ffwrapper::FormatIn *in, std::shared_ptr<ffwrapper::CodedData> data, int64_t &id){
+ if (gb_){
+ AVPacket &pkt = data->getAVPacket();
+ pkt.pts = pkt.dts = AV_NOPTS_VALUE;
+ }
+ int flag = 0;
if (stream_) stream_->SetPacket(data, id);
+ if (decoder_ && run_dec_) flag = decoder_->SetFrame(data, id);
if (rec_->Loaded()) rec_->SetPacket(data, id);
- if (decoder_) decoder_->SetFrame(data, id);
+
+ return flag;
}
void Wrapper::deinit_worker(){
@@ -156,26 +185,38 @@
if (!in) {
logIt("ERROR: init_reader! url: %s\n", input_url_.c_str());
- usleep(200000);
+ sleep(2);
continue;
}
-
+
int wTime = 1000000.0 / in->getFPS() ;
wTime >>= 1;
- logIt("INPUT FPS: %d", wTime);
+ logIt("WAIT TIME PER FRAME: %d", wTime);
init_worker(in.get());
- int64_t id = 0;
+ int64_t id = gb_ ? 0 : -1;
+
while(!stop_stream_.load()){
auto data(std::make_shared<CodedData>());
if (in->readPacket(&data->getAVPacket()) != 0){
logIt("read packet error, id: %lld", id);
break;
}
-
- run_worker(in.get(), data, id);
- usleep(wTime);
+
+ if (in->notVideoAudio(&data->getAVPacket())){
+ continue;
+ }
+
+ if (!gb_ && id < 0){
+ id++;
+ continue;
+ }
+ // decode error
+ if (run_worker(in.get(), data, id) == -1){
+ break;
+ }
+ // usleep(wTime);
id++;
}
@@ -185,13 +226,15 @@
}
void Wrapper::BuildRecorder(const char* id, const char *output, const int mindur, const int maxdur, const bool audio){
-
+ bool a = audio;
+ if (gb_) a = false;
+
if (rec_->Loaded()){
- rec_->NewRec(id, output, mindur, maxdur, audio);
+ rec_->NewRec(id, output, mindur, maxdur, a);
}else{
std::string rid(id), dir(output);
fn_rec_lazy_ =
- [=](ffwrapper::FormatIn *in){rec_->NewRec(rid.c_str(), dir.c_str(), mindur, maxdur, audio);};
+ [=]{rec_->NewRec(rid.c_str(), dir.c_str(), mindur, maxdur, a);};
}
}
@@ -208,15 +251,15 @@
}
////////decoder
void Wrapper::BuildDecoder(){
- // use_decoder_ = true;
+ run_dec_ = true;
}
- void Wrapper::GetPicDecoder(unsigned char **data, int *w, int *h, int64_t *id){
+ void Wrapper::GetPicDecoder(unsigned char **data, int *w, int *h, int *format, int *length, int64_t *id){
if (decoder_){
- decoder_->GetFrame(data, w, h, id);
+ decoder_->GetFrame(data, w, h, format, length, id);
}
}
-
+
void Wrapper::GetPacket(unsigned char **pktData, int *size, int *key){
if (stream_){
stream_->GetPacket(pktData, size, key);
@@ -226,21 +269,31 @@
} // end class wrapper
///////////////////////////////////////////////////////////
///single decode or encoder
- ////// decoder
+////// decoder
+
+#include "ffmpeg/data/FrameData.hpp"
+
+// return val: -1 open error; -2, find stream error; -3, converter create
namespace cffmpeg_wrap{ // start test functions
- uint8_t* DecodeJPEG(const char *file, int *w, int *h){
+ uint8_t* Decode(const char *file, const int gb, int *w, int *h){
VideoProp prop;
prop.url_ = file;
prop.gpu_acc_ = false;
std::unique_ptr<FormatIn> in(new FormatIn(prop.gpuAccl()));
- int flag = in->open(file, NULL);
-
+ int flag = -1;
+ if (gb){
+ flag = in->openGb28181(file, NULL);
+ }else{
+ flag = in->open(file, NULL);
+ }
+
std::unique_ptr<cvbridge> bridge_(nullptr);
if(flag == 0){
if(!in->findStreamInfo(NULL)){
logIt("yolo can't find video stream\n");
+ *w = *h = -2;
return NULL;
}
auto flag = in->openCodec(NULL);
@@ -254,27 +307,36 @@
}else{
logIt("FormatIn openCodec Failed!");
+ *w = *h = -3;
return NULL;
}
}else{
logIt("open %s error", file);
+ *w = *h = -1;
return NULL;
}
- uint8_t *data = NULL;
- AVPacket *pkt = av_packet_alloc();
- if(in->readPacket(pkt) == 0){
- AVFrame *frm = av_frame_alloc();
- if(in->decode(frm, pkt) == 0){
- *w = frm->width;
- *h = frm->height;
- data = (unsigned char*)malloc(frm->width * frm->height * 3);
- bridge_->copyPicture(data, frm);
+ uint8_t *pic = NULL;
+ *w = *h = 0;
+
+ int tryTime = 0;
+ while (tryTime++ < 100){
+
+ auto data(std::make_shared<CodedData>());
+ if (in->readPacket(&data->getAVPacket()) == 0){
+
+ auto frame(std::make_shared<FrameData>());
+ AVFrame *frm = frame->getAVFrame();
+ if(in->decode(frm, &data->getAVPacket()) == 0){
+ *w = frm->width;
+ *h = frm->height;
+ pic = bridge_->convert2Data(frm);
+ break;
+ }
}
- av_frame_free(&frm);
- av_packet_free(&pkt);
}
- return data;
+
+ return pic;
}
/////// for encoder
typedef struct _PicEncoder{
@@ -330,14 +392,14 @@
PicEncoder *e = (PicEncoder*)hdl;
auto ctx = e->enc->getCodecContext();
+ AVPixelFormat pix_fmt = AV_PIX_FMT_BGR24;
if (e->bridge == NULL){
- AVPixelFormat pix_fmt = AV_PIX_FMT_BGR24;
e->bridge = new cvbridge(
w, h, AV_PIX_FMT_BGR24,
e->w, e->h, ctx->pix_fmt, e->flag);
}
- AVFrame *frame = e->bridge->getAVFrame(in, w, h);
+ AVFrame *frame = cvbridge::fillFrame(in, w, h, pix_fmt);
AVPacket *pkt = av_packet_alloc();
auto flag = e->enc->encode(pkt, frame);
@@ -365,5 +427,96 @@
return flag;
}
+///////////////////////////////////////////////////////////
+ typedef struct _conv
+ {
+ int srcW;
+ int srcH;
+ int srcF;
+ int dstW;
+ int dstH;
+ cvbridge *b;
+ }Conv;
+
+ void *CreateConvertor(const int srcW, const int srcH, const int srcFormat,
+ const int dstW, const int dstH, const int dstFormat, const int flag){
+
+ auto bridge = new cvbridge(
+ srcW, srcH, srcFormat,
+ dstW, dstH, dstFormat, flag);
+ if (!bridge) return NULL;
+
+ Conv *c = (Conv*)malloc(sizeof(Conv));
+ c->b = bridge;
+ c->dstW = dstW;
+ c->dstH = dstH;
+ c->srcW = srcW;
+ c->srcH = srcH;
+ c->srcF = srcFormat;
+
+ return c;
+ }
+
+ uint8_t *Convert(void *h, uint8_t *src){
+ Conv *c = (Conv*)h;
+
+ auto b = c->b;
+
+ AVFrame *tmp_frm = cvbridge::fillFrame(src, c->srcW, c->srcH, c->srcF);
+ if (!tmp_frm) return NULL;
+
+ unsigned char *picData = b->convert2Data(tmp_frm);
+
+ av_frame_free(&tmp_frm);
+
+ return picData;
+ }
+
+ void DestoryConvertor(void *h){
+ Conv *c = (Conv*)h;
+ delete c->b;
+ free(c);
+ }
+
+
+ uint8_t* ConvertYUV2BGR(uint8_t *src, const int w, const int h, const int dst_w, const int dst_h, int *length){
+ return NULL;
+
+ // int oldw = w, oldh = h, neww = dst_w, newh = dst_h;
+ // // setting cache and shared modes
+ // cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
+ // cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeFourByte);
+
+ // // allocating and transferring image and binding to texture object
+ // cudaChannelFormatDesc chandesc_img = cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsigned);
+ // cudaArray* d_img_arr;
+ // cudaMallocArray(&d_img_arr, &chandesc_img, oldw, oldh, cudaArrayTextureGather);
+ // cudaMemcpyToArray(d_img_arr, 0, 0, image, oldh * oldw, cudaMemcpyHostToDevice);
+ // struct cudaResourceDesc resdesc_img;
+ // memset(&resdesc_img, 0, sizeof(resdesc_img));
+ // resdesc_img.resType = cudaResourceTypeArray;
+ // resdesc_img.res.array.array = d_img_arr;
+ // struct cudaTextureDesc texdesc_img;
+ // memset(&texdesc_img, 0, sizeof(texdesc_img));
+ // texdesc_img.addressMode[0] = cudaAddressModeClamp;
+ // texdesc_img.addressMode[1] = cudaAddressModeClamp;
+ // texdesc_img.readMode = cudaReadModeNormalizedFloat;
+ // texdesc_img.filterMode = cudaFilterModePoint;
+ // texdesc_img.normalizedCoords = 0;
+ // cudaTextureObject_t d_img_tex = 0;
+ // cudaCreateTextureObject(&d_img_tex, &resdesc_img, &texdesc_img, nullptr);
+
+ // uint8_t* d_out = nullptr;
+ // cudaMalloc(&d_out, total);
+
+ // for (int i = 0; i < warmups; ++i) CUDALERP(d_img_tex, oldw, oldh, d_out, neww, newh);
+ // auto start = high_resolution_clock::now();
+ // for (int i = 0; i < runs; ++i) CUDALERP(d_img_tex, oldw, oldh, d_out, neww, newh);
+ // auto end = high_resolution_clock::now();
+ // auto sum = (end - start) / runs;
+
+ // auto h_out = new uint8_t[neww * newh];
+ // cudaMemcpy(h_out, d_out, total, cudaMemcpyDeviceToHost);
+ }
}
--
Gitblit v1.8.0