#include "wrapper.hpp"
|
|
#include <thread>
|
#include <unistd.h>
|
|
extern "C"{
|
#include <libavformat/avformat.h>
|
#include <libavutil/opt.h>
|
#include <libswscale/swscale.h>
|
}
|
|
|
#include "ffmpeg/configure/conf.hpp"
|
#include "ffmpeg/format/FormatIn.hpp"
|
#include "ffmpeg/format/FormatOut.hpp"
|
#include "ffmpeg/data/CodedData.hpp"
|
#include "ffmpeg/property/VideoProp.hpp"
|
#include "ffmpeg/log/log.hpp"
|
#include "ffmpeg/bridge/cvbridge.hpp"
|
|
#include "buz/recorder.hpp"
|
|
#include "worker/stream.hpp"
|
#include "worker/decoder.hpp"
|
#include "worker/rec.hpp"
|
|
#include "CUDALERP.h"
|
|
using namespace logif;
|
using namespace ffwrapper;
|
|
#define DELETE_POINTER(p) \
|
do \
|
{ \
|
if(NULL != p) \
|
delete p; \
|
p = NULL; \
|
}while(0)
|
|
namespace cffmpeg_wrap{
|
using namespace buz;
|
|
Wrapper::Wrapper()
|
:input_url_("")
|
,audio_(false)
|
,gb_(0)
|
,cpu_(0)
|
,run_dec_(false)
|
,thread_(nullptr)
|
,stop_stream_(false)
|
,stream_(nullptr)
|
,decoder_(nullptr)
|
,rec_(new rec)
|
,logit_(false)
|
{
|
makeTheWorld();
|
}
|
|
Wrapper::Wrapper(const char *logfile)
|
:input_url_("")
|
,audio_(false)
|
,gb_(0)
|
,cpu_(0)
|
,run_dec_(false)
|
,thread_(nullptr)
|
,stop_stream_(false)
|
,stream_(nullptr)
|
,decoder_(nullptr)
|
,rec_(new rec)
|
,logit_(true)
|
{
|
makeTheWorld();
|
logif::CreateLogger(logfile, true);
|
}
|
|
|
Wrapper::~Wrapper()
|
{
|
try
|
{
|
if(thread_){
|
stop_stream_.store(true);
|
thread_->join();
|
}
|
DELETE_POINTER(rec_);
|
}
|
catch(const std::exception& e)
|
{
|
logIt("WRAPPER EXCEPTION: ", e.what());
|
}
|
if (logit_)
|
logif::DestroyLogger();
|
}
|
|
std::unique_ptr<ffwrapper::FormatIn> Wrapper::init_reader(const char* input){
|
|
VideoProp prop;
|
prop.url_ = input;
|
prop.rtsp_tcp_ = true;
|
prop.gpu_acc_ = !cpu_;
|
|
std::unique_ptr<FormatIn> in(new FormatIn(prop.gpuAccl()));
|
AVDictionary *avdic = prop.optsFormat();
|
int flag = -1;
|
if (gb_){
|
flag = in->openGb28181(input, NULL);
|
}else{
|
flag = in->open(input, &avdic);
|
}
|
if(avdic){
|
av_dict_free(&avdic);
|
}
|
if(flag == 0){
|
if(!in->findStreamInfo(NULL)){
|
logIt("can't find video stream\n");
|
return nullptr;
|
}
|
|
return in;
|
}
|
|
return nullptr;
|
}
|
|
int Wrapper::RunStream(const char* input){
|
if(thread_){
|
logIt("wrapper run stream already run");
|
return 0;
|
}
|
|
input_url_ = input;
|
|
thread_.reset(new std::thread([&]{
|
run_stream_thread();
|
}));
|
|
return 0;
|
}
|
|
void Wrapper::AudioSwitch(const bool a){
|
audio_ = a;
|
// if (stream_){
|
// stream_->AudioSwitch(a);
|
// }
|
}
|
|
void Wrapper::init_worker(ffwrapper::FormatIn *in){
|
if (rec_->Loaded() && stream_ && decoder_) return;
|
|
stream_ = new stream(in, 3 * in->getFPS());
|
// stream_->AudioSwitch(audio_);
|
|
decoder_ = new decoder(in);
|
|
rec_->Load(in);
|
if(fn_rec_lazy_) {
|
fn_rec_lazy_();
|
fn_rec_lazy_ = nullptr;
|
}
|
}
|
|
void Wrapper::run_worker(ffwrapper::FormatIn *in, std::shared_ptr<ffwrapper::CodedData> data, int64_t &id){
|
if (gb_){
|
AVPacket &pkt = data->getAVPacket();
|
pkt.pts = pkt.dts = AV_NOPTS_VALUE;
|
}
|
if (stream_) stream_->SetPacket(data, id);
|
if (decoder_ && run_dec_) decoder_->SetFrame(data, id);
|
if (rec_->Loaded()) rec_->SetPacket(data, id);
|
}
|
|
void Wrapper::deinit_worker(){
|
DELETE_POINTER(stream_);
|
DELETE_POINTER(decoder_);
|
rec_->Unload();
|
}
|
|
void Wrapper::run_stream_thread(){
|
|
while(!stop_stream_.load()){
|
auto in = init_reader(input_url_.c_str());
|
|
if (!in) {
|
logIt("ERROR: init_reader! url: %s\n", input_url_.c_str());
|
sleep(2);
|
continue;
|
}
|
|
int wTime = 1000000.0 / in->getFPS() ;
|
wTime >>= 1;
|
logIt("WAIT TIME PER FRAME: %d", wTime);
|
|
init_worker(in.get());
|
|
int64_t id = gb_ ? 0 : -1;
|
|
while(!stop_stream_.load()){
|
auto data(std::make_shared<CodedData>());
|
if (in->readPacket(&data->getAVPacket()) != 0){
|
logIt("read packet error, id: %lld", id);
|
break;
|
}
|
|
if (in->notVideoAudio(&data->getAVPacket())){
|
continue;
|
}
|
|
if (!gb_ && id < 0){
|
id++;
|
continue;
|
}
|
|
run_worker(in.get(), data, id);
|
usleep(wTime);
|
|
id++;
|
}
|
|
deinit_worker();
|
}
|
}
|
|
void Wrapper::BuildRecorder(const char* id, const char *output, const int mindur, const int maxdur, const bool audio){
|
bool a = audio;
|
if (gb_) a = false;
|
|
if (rec_->Loaded()){
|
rec_->NewRec(id, output, mindur, maxdur, a);
|
}else{
|
std::string rid(id), dir(output);
|
fn_rec_lazy_ =
|
[=]{rec_->NewRec(rid.c_str(), dir.c_str(), mindur, maxdur, a);};
|
}
|
}
|
|
int Wrapper::FireRecorder(const char* sid,const int64_t &id){
|
if (rec_->Loaded()){
|
rec_->FireRecSignal(sid, id);
|
}
|
}
|
|
void Wrapper::GetInfoRecorder(std::string &recID, int &index, std::string &path){
|
if (rec_){
|
rec_->GetRecInfo(recID, index, path);
|
}
|
}
|
////////decoder
|
void Wrapper::BuildDecoder(){
|
run_dec_ = true;
|
}
|
|
void Wrapper::GetPicDecoder(unsigned char **data, int *w, int *h, int *format, int *length, int64_t *id){
|
if (decoder_){
|
decoder_->GetFrame(data, w, h, format, length, id);
|
}
|
}
|
|
void Wrapper::GetPacket(unsigned char **pktData, int *size, int *key){
|
if (stream_){
|
stream_->GetPacket(pktData, size, key);
|
}
|
}
|
|
} // end class wrapper
|
///////////////////////////////////////////////////////////
|
///single decode or encoder
|
////// decoder
|
|
#include "ffmpeg/data/FrameData.hpp"
|
|
// return val: -1 open error; -2, find stream error; -3, converter create
|
namespace cffmpeg_wrap{ // start test functions
|
uint8_t* Decode(const char *file, const int gb, int *w, int *h){
|
VideoProp prop;
|
prop.url_ = file;
|
prop.gpu_acc_ = false;
|
|
std::unique_ptr<FormatIn> in(new FormatIn(prop.gpuAccl()));
|
int flag = -1;
|
if (gb){
|
flag = in->openGb28181(file, NULL);
|
}else{
|
flag = in->open(file, NULL);
|
}
|
|
std::unique_ptr<cvbridge> bridge_(nullptr);
|
|
if(flag == 0){
|
if(!in->findStreamInfo(NULL)){
|
logIt("yolo can't find video stream\n");
|
*w = *h = -2;
|
return NULL;
|
}
|
auto flag = in->openCodec(NULL);
|
if(flag){
|
auto dec_ctx = in->getCodecContext();
|
|
AVPixelFormat pix_fmt = AV_PIX_FMT_BGR24;
|
bridge_.reset(new cvbridge(
|
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
|
dec_ctx->width, dec_ctx->height, pix_fmt, SWS_BICUBIC));
|
|
}else{
|
logIt("FormatIn openCodec Failed!");
|
*w = *h = -3;
|
return NULL;
|
}
|
}else{
|
logIt("open %s error", file);
|
*w = *h = -1;
|
return NULL;
|
}
|
|
uint8_t *pic = NULL;
|
*w = *h = 0;
|
|
int tryTime = 0;
|
while (tryTime++ < 100){
|
|
auto data(std::make_shared<CodedData>());
|
if (in->readPacket(&data->getAVPacket()) == 0){
|
|
auto frame(std::make_shared<FrameData>());
|
AVFrame *frm = frame->getAVFrame();
|
if(in->decode(frm, &data->getAVPacket()) == 0){
|
*w = frm->width;
|
*h = frm->height;
|
pic = (unsigned char*)malloc(frm->width * frm->height * 3);
|
bridge_->copyPicture(pic, frm);
|
break;
|
}
|
}
|
}
|
|
return pic;
|
}
|
/////// for encoder
|
typedef struct _PicEncoder{
|
FormatOut *enc;
|
int w;
|
int h;
|
int fps;
|
int br;
|
int gi;
|
int flag;
|
cvbridge *bridge;
|
} PicEncoder;
|
|
void *CreateEncoder(const int w, const int h, const int fps, const int br, const int scale_flag, const int gi){
|
|
PicEncoder *e = (PicEncoder*)malloc(sizeof(PicEncoder));
|
e->enc = NULL;
|
e->w = w;
|
e->h = h;
|
e->fps = fps;
|
e->br = br;
|
e->gi = gi;
|
e->flag = scale_flag;
|
e->bridge = NULL;
|
|
VideoProp prop_;
|
prop_.width_ = w;
|
prop_.height_ = h;
|
prop_.fps_ = fps;
|
prop_.bit_rate_ = br;
|
gi < 0 ? prop_.gpu_acc_ = false : prop_.gpu_acc_ = true;
|
|
FormatOut *enc = new FormatOut(prop_, "./88.mp4");
|
e->enc = enc;
|
|
return e;
|
}
|
|
void DestroyEncoder(void *h){
|
PicEncoder *e = (PicEncoder*)h;
|
if (e == NULL){
|
return;
|
}
|
|
delete e->bridge;
|
delete e->enc;
|
|
free(e);
|
}
|
|
int Encode(void *hdl, uint8_t *in, const int w, const int h, uint8_t **out, int *size, int *key){
|
|
PicEncoder *e = (PicEncoder*)hdl;
|
auto ctx = e->enc->getCodecContext();
|
|
if (e->bridge == NULL){
|
AVPixelFormat pix_fmt = AV_PIX_FMT_BGR24;
|
e->bridge = new cvbridge(
|
w, h, AV_PIX_FMT_BGR24,
|
e->w, e->h, ctx->pix_fmt, e->flag);
|
}
|
|
AVFrame *frame = e->bridge->getAVFrame(in, w, h);
|
AVPacket *pkt = av_packet_alloc();
|
|
auto flag = e->enc->encode(pkt, frame);
|
if(flag == 0){
|
int extradata_size = ctx->extradata_size;
|
uint8_t *extra = ctx->extradata;
|
|
*key = pkt->flags & AV_PKT_FLAG_KEY;
|
if(!(*key)){
|
extradata_size = 0;
|
}
|
*size = pkt->size + extradata_size;
|
*out = (unsigned char *)malloc(*size);
|
|
memcpy(*out, extra, extradata_size);
|
memcpy(*out + extradata_size, pkt->data, pkt->size);
|
|
}else{
|
logIt("encode error or need more packet\n");
|
}
|
|
av_packet_free(&pkt);
|
av_frame_free(&frame);
|
|
return flag;
|
}
|
|
///////////////////////////////////////////////////////////
|
typedef struct _conv
|
{
|
int srcW;
|
int srcH;
|
int srcF;
|
int dstW;
|
int dstH;
|
cvbridge *b;
|
}Conv;
|
|
void *CreateConvertor(const int srcW, const int srcH, const int srcFormat,
|
const int dstW, const int dstH, const int flag){
|
AVPixelFormat pix_fmt = AV_PIX_FMT_BGR24;
|
// just resize
|
if (flag == 0){
|
pix_fmt = (AVPixelFormat)srcFormat;
|
}
|
auto bridge = new cvbridge(
|
srcW, srcH, srcFormat,
|
dstW, dstH, pix_fmt, flag);
|
if (!bridge) return NULL;
|
|
Conv *c = (Conv*)malloc(sizeof(Conv));
|
c->b = bridge;
|
c->dstW = dstW;
|
c->dstH = dstH;
|
c->srcW = srcW;
|
c->srcH = srcH;
|
c->srcF = srcFormat;
|
|
return c;
|
}
|
|
uint8_t *Convert(void *h, uint8_t *src){
|
Conv *c = (Conv*)h;
|
|
auto b = c->b;
|
|
AVFrame *tmp_frm = av_frame_alloc();
|
tmp_frm->format = (AVPixelFormat)c->srcF;
|
tmp_frm->width = c->srcW;
|
tmp_frm->height = c->srcH;
|
|
//create a AVPicture frame from the opencv Mat input image
|
int ret = avpicture_fill((AVPicture *)tmp_frm,
|
(uint8_t *)src,
|
(AVPixelFormat)tmp_frm->format,
|
tmp_frm->width,
|
tmp_frm->height);
|
|
unsigned char *picData = NULL;
|
if (ret > 0){
|
picData = (unsigned char*)malloc(c->dstW * c->dstH * 3);
|
b->copyPicture(picData, tmp_frm);
|
}
|
|
av_frame_free(&tmp_frm);
|
|
return picData;
|
}
|
|
void DestoryConvertor(void *h){
|
Conv *c = (Conv*)h;
|
delete c->b;
|
free(c);
|
}
|
|
|
uint8_t* ConvertYUV2BGR(uint8_t *src, const int w, const int h, const int dst_w, const int dst_h, int *length){
|
return NULL;
|
|
// int oldw = w, oldh = h, neww = dst_w, newh = dst_h;
|
// // setting cache and shared modes
|
// cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
|
// cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeFourByte);
|
|
// // allocating and transferring image and binding to texture object
|
// cudaChannelFormatDesc chandesc_img = cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsigned);
|
// cudaArray* d_img_arr;
|
// cudaMallocArray(&d_img_arr, &chandesc_img, oldw, oldh, cudaArrayTextureGather);
|
// cudaMemcpyToArray(d_img_arr, 0, 0, image, oldh * oldw, cudaMemcpyHostToDevice);
|
// struct cudaResourceDesc resdesc_img;
|
// memset(&resdesc_img, 0, sizeof(resdesc_img));
|
// resdesc_img.resType = cudaResourceTypeArray;
|
// resdesc_img.res.array.array = d_img_arr;
|
// struct cudaTextureDesc texdesc_img;
|
// memset(&texdesc_img, 0, sizeof(texdesc_img));
|
// texdesc_img.addressMode[0] = cudaAddressModeClamp;
|
// texdesc_img.addressMode[1] = cudaAddressModeClamp;
|
// texdesc_img.readMode = cudaReadModeNormalizedFloat;
|
// texdesc_img.filterMode = cudaFilterModePoint;
|
// texdesc_img.normalizedCoords = 0;
|
// cudaTextureObject_t d_img_tex = 0;
|
// cudaCreateTextureObject(&d_img_tex, &resdesc_img, &texdesc_img, nullptr);
|
|
// uint8_t* d_out = nullptr;
|
// cudaMalloc(&d_out, total);
|
|
// for (int i = 0; i < warmups; ++i) CUDALERP(d_img_tex, oldw, oldh, d_out, neww, newh);
|
// auto start = high_resolution_clock::now();
|
// for (int i = 0; i < runs; ++i) CUDALERP(d_img_tex, oldw, oldh, d_out, neww, newh);
|
// auto end = high_resolution_clock::now();
|
// auto sum = (end - start) / runs;
|
|
// auto h_out = new uint8_t[neww * newh];
|
// cudaMemcpy(h_out, d_out, total, cudaMemcpyDeviceToHost);
|
}
|
}
|