//
|
// Created by ps on 19-1-11.
|
//
|
|
#include <zconf.h>
|
#include <opencv2/opencv.hpp>
|
#include "FFmpegDecoderJPG.h"
|
|
void BASICGB28181::initFFmpeg() {
|
av_register_all();
|
avformat_network_init();
|
}
|
|
static bool initFFmpegRet = (BASICGB28181::initFFmpeg(), true);
|
|
//MyQueue<BASICGB28181::frameBuffInfo *> BASICGB28181::FFmpegDecoderJPG::m_rtpQueue;
|
//cv::Mat BASICGB28181::FFmpegDecoderJPG::m_image;
|
|
cv::Mat BASICGB28181::avframe_to_cvmat(AVFrame *frame) {
|
AVFrame dst;
|
memset(&dst, 0, sizeof(dst));
|
int w = frame->width, h = frame->height;
|
cv::Mat m = std::move(cv::Mat(h, w, CV_8UC3));
|
dst.data[0] = (uint8_t *) m.data;
|
avpicture_fill((AVPicture *) &dst, dst.data[0], AV_PIX_FMT_BGR24, w, h);
|
struct SwsContext *convert_ctx = NULL;
|
// PixelFormat src_pixfmt = (enum PixelFormat) frame->format;
|
// PixelFormat dst_pixfmt = AV_PIX_FMT_BGR24;
|
convert_ctx = sws_getContext(w, h, frame->format, w, h, AV_PIX_FMT_BGR24,
|
SWS_FAST_BILINEAR, NULL, NULL, NULL);
|
sws_scale(convert_ctx, frame->data, frame->linesize, 0, h,
|
dst.data, dst.linesize);
|
sws_freeContext(convert_ctx);
|
|
DBG("m.size is " << m.size());
|
|
return m;
|
}
|
|
|
BASICGB28181::FFmpegDecoderJPG::FFmpegDecoderJPG() : m_buf_size(32768), m_running(false), m_PackageState(false),
|
m_readData(false), m_rtpQueue(), frame_number(0),
|
first_frame_number(-1) {
|
}
|
|
BASICGB28181::FFmpegDecoderJPG::~FFmpegDecoderJPG() {
|
while (m_rtpQueue.count_queue()) {
|
m_rtpQueue.popNotWait();
|
}
|
|
}
|
|
bool BASICGB28181::FFmpegDecoderJPG::pushInfo(unsigned char *data, int datalen, const std::string &camIdx) {
|
TryCath(
|
if (!m_running) {
|
ERR(" m_running is false");
|
return false;
|
}
|
#ifdef TestCode
|
DBG(camIdx << " dataLen is " << datalen);
|
#endif
|
frameBuffInfo *info = new frameBuffInfo();
|
info->buff = new unsigned char[datalen];
|
info->buffLen = datalen;
|
info->camIdx = camIdx;
|
memcpy(info->buff, data, datalen);
|
{
|
// FILE *fp11 = NULL;
|
// if (!fp11) {
|
// fp11 = fopen(camIdx.c_str(), "a+");
|
// }
|
// fwrite(data, sizeof(char), datalen, fp11);
|
// fclose(fp11);
|
}
|
|
#ifdef TestCode
|
DBG(" m_rtpQueue.push before ");
|
#endif
|
m_rtpQueue.
|
push(info);
|
#ifdef TestCode
|
DBG(" m_rtpQueue.push after ");
|
#endif
|
);
|
return true;
|
}
|
|
int BASICGB28181::FFmpegDecoderJPG::read_data(void *opaque, uint8_t *buf, int bufsize) {
|
#ifdef TestCode
|
ClockTimer cl("read_data");
|
#endif
|
FFmpegDecoderJPG *fFmpegDecoderJPG = (FFmpegDecoderJPG *) opaque;
|
int len = bufsize;
|
int diff = 0;
|
do {
|
|
// DBG(" m_rtpQueue.pop before ");
|
frameBuffInfo *buffinfo = fFmpegDecoderJPG->m_rtpQueue.pop();
|
// DBG(" m_rtpQueue.pop after ");
|
diff = len - buffinfo->buffLen;
|
// printf("bufsize is :%ld,len is :%ld, datalen:%d \n", bufsize, len, buffinfo->buffLen);
|
//帧长大于bufsize
|
if (diff < 0) {
|
// DBG("/帧长大于bufsize" << diff);
|
memcpy(buf + bufsize - len, buffinfo->buff, len);
|
|
frameBuffInfo *info = new frameBuffInfo();
|
info->buffLen = buffinfo->buffLen - len;
|
info->buff = new uint8_t[buffinfo->buffLen - len]{};
|
memcpy(info->buff, buffinfo->buff + len, buffinfo->buffLen - len);
|
|
fFmpegDecoderJPG->m_rtpQueue.push_front_one(info);
|
fFmpegDecoderJPG->m_PackageState = true;
|
} else if (diff == 0) {
|
// DBG("/帧长等于bufsize" << diff);
|
memcpy(buf + bufsize - len, buffinfo->buff, buffinfo->buffLen);
|
fFmpegDecoderJPG->m_PackageState = false;
|
} else if (diff > 0) {
|
// DBG("/帧长小于bufsize" << diff);
|
memcpy(buf + bufsize - len, buffinfo->buff, buffinfo->buffLen);
|
len = len - buffinfo->buffLen; //还需要填充的大小
|
memset(buf + bufsize - len, 0, len);
|
// if (fFmpegDecoderJPG->m_PackageState) {
|
//不等待填充,直接进行解码
|
diff = 0;
|
fFmpegDecoderJPG->m_PackageState = false;
|
// }
|
}
|
delete[] buffinfo->buff;
|
delete buffinfo;
|
} while (diff > 0);
|
//#todo 触发信号
|
// DBG("emitSigal(\"read_dataOk\") begin");
|
// gSignalLock.emitSigal("read_dataOk");
|
fFmpegDecoderJPG->m_readData = true;
|
// DBG("emitSigal(\"read_dataOk\") after");
|
return bufsize;
|
}
|
|
/***
|
* 解码线程
|
* @param p_this
|
*/
|
void BASICGB28181::FFmpegDecoderJPG::BareFlowDecoderThd(FFmpegDecoderJPG *p_this) {
|
DBG(p_this->m_camIdx << " BareFlowDecoderThd ok ... gpuIdx is " << p_this->m_gpuIdx);
|
p_this->m_running = true;
|
// av_register_all();
|
// avformat_network_init();
|
p_this->ic = avformat_alloc_context();
|
|
p_this->iobuffer = (unsigned char *) av_malloc(p_this->m_buf_size);
|
p_this->avio = avio_alloc_context(p_this->iobuffer, p_this->m_buf_size, 0, p_this, p_this->read_data, NULL, NULL);
|
p_this->ic->pb = p_this->avio;
|
|
int err = av_probe_input_buffer(p_this->ic->pb, &p_this->ic->iformat, nullptr, nullptr, 0, p_this->m_buf_size);
|
int err1 = avformat_open_input(&p_this->ic, "", NULL, NULL);
|
// int err2 = avformat_find_stream_info(ic, nullptr);
|
int err2 = avformat_find_stream_info(p_this->ic, NULL);
|
int vi = -1;
|
for (int i = 0; i < p_this->ic->nb_streams; ++i) {
|
if (p_this->ic->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
|
vi = i;
|
break;
|
}
|
}
|
p_this->stream = p_this->ic->streams[vi];
|
p_this->video_st = p_this->stream;
|
p_this->ctx = avcodec_alloc_context3(nullptr);
|
int err3 = avcodec_parameters_to_context(p_this->ctx, p_this->stream->codecpar);
|
|
p_this->codec = avcodec_find_decoder(p_this->ctx->codec_id);
|
//是否启用GPU
|
if (p_this->m_gpuIdx >= 0) {
|
if (p_this->codec != NULL) {
|
char cuvidName[40] = {0};
|
sprintf(cuvidName, "%s_cuvid", p_this->codec->name);
|
if (!strcmp(p_this->codec->name, "h264") || !strcmp(p_this->codec->name, "h265") ||
|
!strcmp(p_this->codec->name, "hevc")) {
|
p_this->codec_cuvid = avcodec_find_decoder_by_name(cuvidName);
|
if (p_this->codec_cuvid != NULL) {
|
p_this->codec = p_this->codec_cuvid;
|
} else {
|
// return false;
|
ERR("codec_cuvid is NULL");
|
}
|
}
|
}
|
}
|
int err4 = avcodec_open2(p_this->ctx, p_this->codec, nullptr);
|
|
av_init_packet(&p_this->pkt);
|
|
p_this->frame = av_frame_alloc();
|
unsigned int usleepTime = (1000 / p_this->m_fps) - 12;
|
usleepTime *= 1000;
|
DBG(" before while <<usleepTime is " << usleepTime);
|
while (p_this->m_running) {
|
#ifdef TestCode
|
ClockTimer Test("while time");
|
#endif
|
int err5 = av_read_frame(p_this->ic, &p_this->pkt);
|
//# todo save package
|
p_this->frame_number++;
|
|
int err6 = avcodec_send_packet(p_this->ctx, &p_this->pkt);
|
// av_packet_unref(&pkt);
|
int err7 = avcodec_receive_frame(p_this->ctx, p_this->frame);
|
if ((err7 == AVERROR(EAGAIN)) || (err5 < 0) || (err6 < 0)) {
|
ERR(" error << err7:" << err7 << " err5: " << err5 << " err6: " << err6);
|
usleep(40000);
|
continue;
|
}
|
//DBG("GotPicture "<<m_camId<<":"<<frame_number);
|
//放在此处是因为之前放在前面,收到的帧不完成
|
p_this->SaveToPacketVector(p_this->pkt);
|
p_this->CheckSave();
|
|
// BASICGB28181::avframe_to_cvmat(frame).copyTo(p_this->m_image);
|
p_this->m_image = std::move(BASICGB28181::avframe_to_cvmat(p_this->frame));
|
|
#ifdef TestCode
|
{
|
// TestCode 测试代码保存图片到本地
|
ClockTimer cl("TestCode");
|
std::string strNewTime2 = AppUtil::getTimeUSecString();
|
cv::putText(p_this->m_image, strNewTime2, cv::Point(408, 540), cv::HersheyFonts::FONT_HERSHEY_PLAIN,
|
5, cv::Scalar(255, 255, 0), 2);
|
std::thread test([&](cv::Mat img, std::string strThing) {
|
try {
|
std::string strNewTime = "tmpDec/";
|
strNewTime.append(p_this->m_camIdx + "_").append(strThing).append(".jpg");
|
// cv::imwrite(strNewTime, p_this->m_image);
|
} catch (std::exception ex) {
|
ERR(ex.what());
|
}
|
}, p_this->m_image, strNewTime2);
|
test.detach();
|
}
|
#endif
|
|
//#todo send to other thd
|
#ifdef TestCode
|
DBG("emitSigal(\"DecoderImageOK\") begin");
|
#endif
|
//触发信号
|
gSignalLock.emitSigal(p_this->m_camIdx + "DecoderImageOK");
|
//#ifdef TestCode
|
// DBG("emitSigal(\"DecoderImageOK\") after");
|
//#endif
|
|
DBG("emitSigal(\"DecoderImageOK\") after");
|
DBG("p_this->m_camIdx is " << p_this->m_camIdx << " queue size is " << p_this->m_rtpQueue.count_queue());
|
|
#ifdef TestCode
|
{
|
ClockTimer cl("waitTime");
|
int loop = 0;
|
//#TODO
|
// while ((loop++ < 3000) && !(p_this->m_readData)) {
|
// usleep(10);
|
// }
|
|
usleep(30000);
|
DBG("p_this->m_readData is " << p_this->m_readData << " loop is " << loop << " queue size is "
|
<< p_this->m_rtpQueue.count_queue());
|
p_this->m_readData = false;
|
// usleep(12000);
|
}
|
#else
|
usleep(usleepTime);
|
#endif
|
|
}
|
DBG(" after while ");
|
av_frame_free(&p_this->frame);
|
}
|
|
bool BASICGB28181::FFmpegDecoderJPG::startThd(const std::string &camIdx, const int &fps, const int &gpuIdx) {
|
TryCath(
|
DBG(camIdx << " FFmpegDecoderJPG startThd ... gpuIdx is " << gpuIdx);
|
m_gpuIdx = gpuIdx;
|
m_fps = fps;
|
if (gpuIdx >= 0) {
|
setenv("CUDA_VISIBLE_DEVICES", std::to_string(gpuIdx).c_str(), 0);
|
}
|
m_camIdx = camIdx;
|
std::thread t_BareFlowDecoder(BareFlowDecoderThd, this);
|
t_BareFlowDecoder.detach();
|
);
|
return true;
|
}
|
|
bool BASICGB28181::FFmpegDecoderJPG::stopThd() {
|
TryCath(
|
DBG(m_camIdx << " FFmpegDecoderJPG stopThd ... " << m_camIdx);
|
m_running = false;
|
);
|
return true;
|
}
|
|
bool BASICGB28181::FFmpegDecoderJPG::getRunning() {
|
return m_running;
|
}
|
|
cv::Mat BASICGB28181::FFmpegDecoderJPG::getImage() {
|
return m_image;
|
}
|
|
std::string BASICGB28181::FFmpegDecoderJPG::GetImageName() {
|
ImageName_s_t st;
|
st.m_camId = this->m_camIdx;
|
st.m_frameId = this->m_frameIndex;
|
st.m_timeStamp = AppUtil::GetTimeWithHyphen();
|
return st.toString();
|
}
|
|
bool BASICGB28181::FFmpegDecoderJPG::SaveVideoByImageName(const std::string &strPath, const std::string &imageName) {
|
DBG(" strPath is " << strPath << " imageName " << imageName);
|
ImageName_s_t imgName_s = ImageName_s_t::fromString(imageName);
|
if (!imgName_s.Valid()) {
|
ERR("Image Name Valid " << imageName);
|
return false;
|
}
|
|
m_videoPath = strPath;
|
if (m_recordState == STOP_RECORD) {
|
m_recordState = RECORDING_VIDEO;
|
m_startFrameId = m_endFrameId = imgName_s.m_frameId;
|
} else {
|
if (imgName_s.m_frameId > m_endFrameId) {
|
m_endFrameId = imgName_s.m_frameId;
|
}
|
}
|
|
if (!m_packetsVec.empty()) {
|
if (imgName_s.m_frameId < m_packetsVec[0].m_frameId) {
|
ERR("Save Video Failed: PackageFirstID: " << m_packetsVec[0].m_frameId << " ImageId: "
|
<< imgName_s.m_frameId);
|
}
|
}
|
return true;
|
}
|
|
bool BASICGB28181::FFmpegDecoderJPG::SetMinMaxVideoSeconds(const int minSeconds, const int maxSecond) {
|
if (minSeconds < 0 || maxSecond < 0 && minSeconds >= maxSecond) {
|
return false;
|
} else {
|
m_minVideoFrameCount = minSeconds * 25;
|
m_maxVideoFrameCount = maxSecond * 25;
|
return true;
|
}
|
}
|
|
bool BASICGB28181::FFmpegDecoderJPG::CleanToFrameId(int64_t lastFrameId) {
|
std::lock_guard<std::mutex> lock(g_mutex);
|
if (RECORDING_VIDEO == m_recordState) {
|
if (!m_packetsVec.empty()) {
|
auto iter = m_packetsVec.begin();
|
while (iter->m_frameId < lastFrameId) {
|
INFO("DropFrame: " << iter->m_frameId);
|
delete iter->m_packet.data;
|
iter = m_packetsVec.erase(iter);
|
}
|
}
|
}
|
return true;
|
}
|
|
bool BASICGB28181::FFmpegDecoderJPG::CleanOneKeyFrameOneRange() {
|
std::lock_guard<std::mutex> lock(g_mutex);
|
if (!m_packetsVec.empty() && STOP_RECORD == m_recordState) {
|
auto firstFrame = m_packetsVec[0];
|
//视频的最短长度有问题,可以考虑修改此处 m_minVideoFrameCount
|
if ((m_last_I_FrameId - firstFrame.m_frameId > m_minVideoFrameCount / 2)) {
|
auto iter = m_packetsVec.begin();
|
delete iter->m_packet.data;
|
iter = m_packetsVec.erase(iter);
|
while (!(iter->m_packet.flags & AV_PKT_FLAG_KEY)) {
|
// INFO("DropFrame: " << iter->m_frameId);
|
delete iter->m_packet.data;
|
iter = m_packetsVec.erase(iter);
|
}
|
}
|
}
|
return true;
|
}
|
|
bool BASICGB28181::FFmpegDecoderJPG::SaveVideo(std::string path, int64_t lastFrameId) {
|
std::lock_guard<std::mutex> lock(g_mutex);
|
INFO("SaveVideo: " << path);
|
if (!m_packetsVec.empty()) {
|
startWrite(path.c_str());
|
int64_t firstKeyFramePts = m_packetsVec[0].m_packet.pts;
|
int64_t firstKeyFrameDts = m_packetsVec[0].m_packet.dts;
|
for (const auto &item:m_packetsVec) {
|
if (item.m_frameId < lastFrameId) {
|
DBG("item.m_frameId < lastFrameId " << item.m_frameId << " " << lastFrameId);
|
conversion(const_cast<AVPacket *> (&item.m_packet), firstKeyFramePts, firstKeyFrameDts, video_st);
|
av_write_frame(m_pOutFmtCtx, &item.m_packet);
|
} else {
|
DBG("item.m_frameId > lastFrameId " << item.m_frameId << " " << lastFrameId);
|
break;
|
}
|
}
|
stopWrite();
|
}
|
return true;
|
}
|
|
bool BASICGB28181::FFmpegDecoderJPG::HandleSave() {
|
if (m_recordState == RECORDING_VIDEO) {
|
auto firstFrame = m_packetsVec[0];
|
VideoName_s_t st;
|
st.m_camId = m_camIdx;
|
st.m_timeStamp = AppUtil::GetTimeWithHyphen();
|
st.m_startFrameId = firstFrame.m_frameId;
|
st.m_endFrameId = m_last_I_FrameId - 1;
|
//结尾留的已经足够,并且没有新的帧需要录像
|
if (m_last_I_FrameId - m_endFrameId > m_minVideoFrameCount / 2) {
|
// INFO("LastIFrameID: " << m_last_I_FrameId << " FirstFrameID: " << st.m_startFrameId << " m_endFrameID: "
|
// << m_endFrameId << " MinVideoFrameCount :" << m_minVideoFrameCount);
|
m_startFrameId = m_endFrameId = -1;
|
|
SaveVideo(m_videoPath + st.ToVideoName(), m_last_I_FrameId);
|
CleanToFrameId(m_last_I_FrameId);
|
m_recordState = STOP_RECORD;
|
} else {
|
//缓冲区中已经有太多的帧了,并且剩余的在缓冲队列的m_last_I_FrameId之后还有需要录像的帧
|
if (m_endFrameId - firstFrame.m_frameId > m_maxVideoFrameCount) {
|
// INFO("FirstFrameID: " << firstFrame.m_frameId << " m_endFrameID: " << m_endFrameId
|
// << " MinVideoFrameCount :" << m_maxVideoFrameCount);
|
m_startFrameId = m_last_I_FrameId;
|
SaveVideo(m_videoPath + st.ToVideoName(), m_last_I_FrameId);
|
CleanToFrameId(m_last_I_FrameId);
|
}
|
}
|
}
|
return true;
|
}
|
|
bool BASICGB28181::FFmpegDecoderJPG::CheckSave() {
|
if (!m_packetsVec.empty()) {
|
if (RECORDING_VIDEO == m_recordState) {
|
HandleSave();
|
return true;
|
}
|
return CleanOneKeyFrameOneRange();
|
}
|
}
|
|
void BASICGB28181::FFmpegDecoderJPG::SaveToPacketVector(AVPacket &packet) {
|
AVPacket newPacket(packet);
|
newPacket.data = reinterpret_cast<uint8_t *>(new uint64_t[
|
(packet.size + FF_INPUT_BUFFER_PADDING_SIZE) / sizeof(uint64_t) + 1]);
|
memcpy(newPacket.data, packet.data, packet.size);
|
m_frameIndex++;
|
m_packetsVec.push_back({m_frameIndex, newPacket});
|
if (newPacket.flags & AV_PKT_FLAG_KEY) {
|
DBG("newPacket.flags & AV_PKT_FLAG_KEY ");
|
m_last_I_FrameId = m_frameIndex;
|
}
|
}
|
|
int BASICGB28181::FFmpegDecoderJPG::startWrite(const char *filename) {
|
if (video_st == nullptr) {
|
printf("video_st instream is null");
|
return -1;
|
}
|
int ret = avformat_alloc_output_context2(&m_pOutFmtCtx, NULL, NULL, filename);
|
if (ret < 0) {
|
fprintf(stderr, "avformat_alloc_output_context2 failed, errorCode: %d\n", AVERROR(ret));
|
return -1;
|
}
|
/*
|
* since all input files are supposed to be identical (framerate, dimension, color format, ...)
|
* we can safely set output codec values from first input file
|
*/
|
m_pOutVideo_stream = avformat_new_stream(m_pOutFmtCtx, NULL);
|
{
|
AVCodecContext *c;
|
c = m_pOutVideo_stream->codec;
|
c->bit_rate = 400000;
|
c->codec_id = video_st->codec->codec_id;
|
c->codec_type = video_st->codec->codec_type;
|
c->time_base.num = video_st->time_base.num;
|
c->time_base.den = video_st->time_base.den;
|
fprintf(stderr, "time_base.num = %d time_base.den = %d\n", c->time_base.num, c->time_base.den);
|
c->width = video_st->codec->width;
|
c->height = video_st->codec->height;
|
c->pix_fmt = video_st->codec->pix_fmt;
|
printf("%d %d %d", c->width, c->height, c->pix_fmt);
|
c->flags = video_st->codec->flags;
|
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
c->me_range = video_st->codec->me_range;
|
c->max_qdiff = video_st->codec->max_qdiff;
|
|
c->qmin = video_st->codec->qmin;
|
c->qmax = video_st->codec->qmax;
|
|
c->qcompress = video_st->codec->qcompress;
|
}
|
ret = avio_open(&m_pOutFmtCtx->pb, filename, AVIO_FLAG_WRITE);
|
if (ret < 0) {
|
fprintf(stderr, "could not find stream info, errorCode: %d\n", AVERROR(ret));
|
return -1;
|
}
|
|
DBG(" avformat_write_header " << avformat_write_header(m_pOutFmtCtx, NULL));
|
|
m_bstartWrite = true;
|
m_bFirstKeyFrame = true;
|
m_nFirstKeyDts = 0;
|
m_nFirstKeyPts = 0;
|
|
return 0;
|
}
|
|
int BASICGB28181::FFmpegDecoderJPG::stopWrite() {
|
if (m_pOutFmtCtx == nullptr) return -1;
|
av_write_trailer(m_pOutFmtCtx);
|
avio_close(m_pOutFmtCtx->pb);
|
avcodec_close(m_pOutFmtCtx->streams[0]->codec);
|
av_freep(&m_pOutFmtCtx->streams[0]->codec);
|
av_freep(&m_pOutFmtCtx->streams[0]);
|
|
av_free(m_pOutFmtCtx);
|
m_pOutFmtCtx = nullptr;
|
m_bstartWrite = false;
|
return 0;
|
}
|
|
|
void BASICGB28181::FFmpegDecoderJPG::conversion(void *packet, const long int &firstKeyPts, const long int &firstKeyDts,
|
void *inVideoStream) {
|
if ((packet != nullptr) && (inVideoStream != nullptr)) {
|
AVStream *inStream = (AVStream *) inVideoStream;
|
AVPacket *pkg = static_cast<AVPacket *>(packet);
|
// static int a = 0;
|
// pkg->dts = a++;
|
// pkg->pts = a;
|
pkg->pts -= firstKeyPts;
|
pkg->dts -= firstKeyDts;
|
pkg->pts = av_rescale_q_rnd(pkg->pts, inStream->time_base,
|
m_pOutVideo_stream->time_base,
|
(AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
|
pkg->dts = av_rescale_q_rnd(pkg->dts, inStream->time_base,
|
m_pOutVideo_stream->time_base,
|
(AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
|
pkg->duration = av_rescale_q(pkg->duration, inStream->time_base,
|
m_pOutVideo_stream->time_base);
|
pkg->pos = -1;
|
}
|
}
|