Merge branch 'master' of ssh://192.168.5.5:29418/valib/goffmpeg
# Conflicts:
# csrc/wrapper.cpp
# csrc/wrapper.hpp
# goffmpeg.go
| | |
| | | *.app |
| | | |
| | | /build |
| | | build* |
| | | .idea |
| | | .vscode |
| | |
| | | |
| | | cuda_add_library(${BIN} SHARED ${FFMPEG_LIST} ${CUDA_LIST}) |
| | | |
| | | target_link_libraries(${BIN} ${LINK_LIB} numa nppig nppicc nppc -lz pthread dl rtspclient StreamParse) |
| | | target_link_libraries(${BIN} ${LINK_LIB} -lz pthread dl rtspclient StreamParse) |
| | |
| | | char * c_ffmpeg_get_gb28181_pic(const char *rtspUrl, int *retDataLen); |
| | | void c_ffmepg_use_cpu(const cffmpeg h); |
| | | /////////passive api |
| | | void c_ffmpeg_open_recorder(const cffmpeg h); |
| | | void c_ffmpeg_set_record_duration(const cffmpeg h, const int min, const int max); |
| | | void c_ffmpeg_build_recorder(const cffmpeg h, const char*id, const char *dir, const int64_t fid, int mind, int maxd, int audio); |
| | | void c_ffmpeg_fire_recorder(const cffmpeg h, const char*sid, const int64_t id); |
| | |
| | | |
| | | void c_ffmpeg_build_decoder(const cffmpeg h); |
| | | void* c_ffmpeg_get_pic_decoder(const cffmpeg h, int *wid, int *hei, int *format, int *length, int64_t *id); |
| | | void c_ffmpeg_close_stream(const cffmpeg h); |
| | | void* c_ffmpeg_get_avpacket(const cffmpeg h, int *size, int *key); |
| | | |
| | | //////decoder |
| | | void* c_ffmpeg_decode(const char *file, const int gb, int *wid, int *hei); |
| | | |
| | | // pic encoder |
| | | void *c_ffmpeg_create_encoder(const int w, const int h, const int fps, const int br, const int scale_flag, const int gi); |
| | | void *c_ffmpeg_create_encoder(const int w, const int h, const int fps, const int br, const int pix_fmt, const int scale_flag, const int gi); |
| | | void c_ffmpeg_destroy_encoder(void *h); |
| | | int c_ffmpeg_encode(void *hdl, uint8_t *in, const int w, const int h, uint8_t **out, int *size, int *key); |
| | | |
| | | // conv cpu |
| | | void *c_ffmpeg_create_conv(const int srcW, const int srcH, const int srcFormat, |
| | | const int dstW, const int dstH, const int dstFormat, const int flag); |
| | | void c_ffmpeg_destroy_conv(void *h); |
| | | void *c_ffmpeg_conv(void *h, uint8_t *in); |
| | | |
| | | #ifdef __cplusplus |
| | | } |
| | |
| | | |
| | | |
| | | //////passive api |
| | | void c_ffmpeg_open_recorder(const cffmpeg h){ |
| | | Wrapper *s = (Wrapper*)h; |
| | | s->OpenRecorder(); |
| | | } |
| | | void c_ffmpeg_set_record_duration(const cffmpeg h, const int min, const int max){ |
| | | Wrapper *s = (Wrapper*)h; |
| | | s->SetRecMinCacheTime(min); |
| | |
| | | |
| | | void c_ffmpeg_build_decoder(const cffmpeg h){ |
| | | Wrapper *s = (Wrapper*)h; |
| | | s->BuildDecoder(); |
| | | s->OpenDecoder(); |
| | | } |
| | | |
| | | void* c_ffmpeg_get_pic_decoder(const cffmpeg h, int *wid, int *hei, int *format, int *length, int64_t *id){ |
| | |
| | | return data; |
| | | } |
| | | |
| | | void c_ffmpeg_close_stream(const cffmpeg h){ |
| | | Wrapper *s = (Wrapper*)h; |
| | | s->CloseStream(); |
| | | } |
| | | |
| | | void* c_ffmpeg_get_avpacket(const cffmpeg h, int *size, int *key){ |
| | | Wrapper *s = (Wrapper*)h; |
| | | unsigned char *data = NULL; |
| | |
| | | return data; |
| | | } |
| | | |
| | | /////////////////////test |
| | | void* c_ffmpeg_decode(const char *file, const int gb, int *wid, int *hei){ |
| | | return Decode(file, gb, wid, hei); |
| | | } |
| | | |
| | | // pic encoder |
| | | void *c_ffmpeg_create_encoder(const int w, const int h, const int fps, const int br, const int scale_flag, const int gi){ |
| | | return CreateEncoder(w, h, fps, br, scale_flag, gi); |
| | | void *c_ffmpeg_create_encoder(const int w, const int h, const int fps, const int br, const int pix_fmt, const int scale_flag, const int gi){ |
| | | return CreateEncoder(w, h, fps, br, pix_fmt, scale_flag, gi); |
| | | } |
| | | |
| | | void c_ffmpeg_destroy_encoder(void *h){ |
| | |
| | | |
| | | int c_ffmpeg_encode(void *hdl, uint8_t *in, const int w, const int h, uint8_t **out, int *size, int *key){ |
| | | return Encode(hdl, in, w, h, out, size, key); |
| | | } |
| | | |
| | | void *c_ffmpeg_create_conv(const int srcW, const int srcH, const int srcFormat, |
| | | const int dstW, const int dstH, const int dstFormat, const int flag){ |
| | | return CreateConvertor(srcW, srcH, srcFormat, dstW, dstH, dstFormat, flag); |
| | | } |
| | | |
| | | void *c_ffmpeg_conv(void *h, uint8_t *in){ |
| | | return Convert(h, in); |
| | | } |
| | | |
| | | void c_ffmpeg_destroy_conv(void *h){ |
| | | DestoryConvertor(h); |
| | | } |
| | |
| | | return suitable_gpu; |
| | | } |
| | | |
| | | int getGPUPrior(const int need, const int reserved, const int lastChoice){ |
| | | nvGpuInfo_t gpu_info; |
| | | |
| | | int ret = get_gpu_info(&gpu_info); |
| | | if(!ret){ |
| | | if (gpu_info.device_count == 0) return -1; |
| | | |
| | | int suitable_gpu = -1; |
| | | int mem_idle = need; |
| | | for(int i = 0; i < gpu_info.device_count; i++){ |
| | | if (i != lastChoice){ |
| | | int mem_free = gpu_info.devices[i].memory_free >> 20; |
| | | if(mem_free > mem_idle){ |
| | | mem_idle = mem_free; |
| | | suitable_gpu = i; |
| | | } |
| | | } |
| | | } |
| | | if (suitable_gpu != -1){ |
| | | return suitable_gpu; |
| | | }else{ |
| | | if (gpu_info.device_count <= lastChoice) return -1; |
| | | int mem_free = (gpu_info.devices[lastChoice].memory_free >> 20) - reserved; |
| | | if(mem_free > need){ |
| | | return lastChoice; |
| | | } |
| | | } |
| | | } |
| | | return -1; |
| | | } |
| | | |
| | | int getGPU(const int need){ |
| | | nvGpuInfo_t gpu_buf; |
| | | |
| | |
| | | |
| | | namespace gpu{ |
| | | int getGPU(const int need); |
| | | int getGPUPrior(const int need, const int reserved, const int lastChoice); |
| | | } |
| | | |
| | | #endif |
| | |
| | | return NULL; |
| | | } |
| | | |
| | | if(!scale_->scaleFrame(in, pic_->getAVFrame())){ |
| | | return NULL; |
| | | } |
| | | |
| | | return av_frame_clone(pic_->getAVFrame()); |
| | | uint8_t *out = convert2Data(in); |
| | | AVFrame *frm = NULL; |
| | | if (out){ |
| | | frm = fillFrame(out, scale_->dstW_, scale_->dstH_, scale_->dstFmt_); |
| | | } |
| | | free(out); |
| | | return frm; |
| | | } |
| | | |
| | | ///////////////////////////////////////////////////////////////// |
| | |
| | | bool PicData::init_AVPicture(){ |
| | | data_size_ = avpicture_get_size((AVPixelFormat)pix_fmt_, |
| | | width_, height_); |
| | | if(data_size_ < 0){ |
| | | if(data_size_ <= 0){ |
| | | logIt("avpicture_get_size error"); |
| | | return false; |
| | | } |
| | |
| | | |
| | | FormatIn::~FormatIn() |
| | | { |
| | | if(ctx_){ |
| | | if (!(ctx_->flags & AVFMT_FLAG_CUSTOM_IO)){ |
| | | avformat_close_input(&ctx_); |
| | | }else{ |
| | | avformat_free_context(ctx_); |
| | | } |
| | | logIt("free format in"); |
| | | if(dec_ctx_){ |
| | | avcodec_close(dec_ctx_); |
| | | avcodec_free_context(&dec_ctx_); |
| | | dec_ctx_ = NULL; |
| | | } |
| | | |
| | | if(ctx_){ |
| | | avformat_close_input(&ctx_); |
| | | ctx_ = NULL; |
| | | if(dec_ctx_){ |
| | | avcodec_close(dec_ctx_); |
| | | dec_ctx_ = NULL; |
| | | } |
| | | |
| | | } |
| | | |
| | | if (handle_gb28181){ |
| | | delete handle_gb28181; |
| | | } |
| | | if(read_io_buff_){ |
| | | av_free(read_io_buff_); |
| | | read_io_buff_ = NULL; |
| | | } |
| | | |
| | | if(io_ctx_){ |
| | | av_freep(&io_ctx_->buffer); |
| | | avio_context_free(&io_ctx_); |
| | | io_ctx_ = NULL; |
| | | } |
| | |
| | | |
| | | ctx_->pb = io_ctx_; |
| | | |
| | | auto err = av_probe_input_buffer(ctx_->pb, &ctx_->iformat, NULL, NULL, 0, 0); |
| | | if(err != 0){ |
| | | logIt("open with custom io prob input buffer error:%d err: %s\n", err, getAVErrorDesc(err).c_str()); |
| | | return -1; |
| | | } |
| | | // auto err = av_probe_input_buffer(ctx_->pb, &ctx_->iformat, NULL, NULL, 0, 0); |
| | | // if(err != 0){ |
| | | // logIt("open with custom io prob input buffer error:%d err: %s\n", err, getAVErrorDesc(err).c_str()); |
| | | // return -1; |
| | | // } |
| | | |
| | | return 0; |
| | | } |
| | |
| | | }else if(in->avg_frame_rate.num >=1 && in->avg_frame_rate.den >= 1){ |
| | | fps_ = av_q2d(in->avg_frame_rate); |
| | | } |
| | | logIt("in stream video fps %f, time_base: %d : %d", fps_, in->time_base.num, in->time_base.den); |
| | | logIt("in stream video fps %f, time_base: %d : %d, size: %dx%d", fps_, in->time_base.num, in->time_base.den, in->codecpar->width, in->codecpar->height); |
| | | } |
| | | if (type == AVMEDIA_TYPE_AUDIO){ |
| | | auto in = ctx_->streams[i]; |
| | |
| | | for (int i = 0; i < 2; ++i) |
| | | { |
| | | if(hw_accl_){ |
| | | idle_gpu = gpu::getGPU(100); |
| | | idle_gpu = gpu::getGPUPrior(300, 1024, 0); |
| | | if(idle_gpu < 0){ |
| | | logIt("NO GPU RESOURCE TO DECODE"); |
| | | hw_accl_ = false; |
| | | continue; |
| | | } |
| | | if(codecpar->codec_id == AV_CODEC_ID_H264){ |
| | | dec = avcodec_find_decoder_by_name("h264_cuvid"); |
| | | }else if(codecpar->codec_id == AV_CODEC_ID_H265){ |
| | | dec = avcodec_find_decoder_by_name("hevc_cuvid"); |
| | | } |
| | | |
| | | std::string codec_name(avcodec_get_name(codecpar->codec_id)); |
| | | codec_name += "_cuvid"; |
| | | dec = avcodec_find_decoder_by_name(codec_name.c_str()); |
| | | |
| | | if(!dec){ |
| | | hw_accl_ = false; |
| | | continue; |
| | |
| | | AV_CODEC_ID_DXV, |
| | | AV_CODEC_ID_SCREENPRESSO, |
| | | AV_CODEC_ID_RSCC, |
| | | AV_CODEC_ID_AVS2, |
| | | |
| | | AV_CODEC_ID_Y41P = 0x8000, |
| | | AV_CODEC_ID_AVRP, |
| | |
| | | AV_CODEC_ID_SVG, |
| | | AV_CODEC_ID_GDV, |
| | | AV_CODEC_ID_FITS, |
| | | AV_CODEC_ID_IMM4, |
| | | AV_CODEC_ID_PROSUMER, |
| | | AV_CODEC_ID_MWSC, |
| | | AV_CODEC_ID_WCMV, |
| | | AV_CODEC_ID_RASC, |
| | | AV_CODEC_ID_HYMT, |
| | | AV_CODEC_ID_ARBC, |
| | | AV_CODEC_ID_AGM, |
| | | AV_CODEC_ID_LSCR, |
| | | AV_CODEC_ID_VP4, |
| | | |
| | | /* various PCM "codecs" */ |
| | | AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs |
| | |
| | | AV_CODEC_ID_PCM_S64BE, |
| | | AV_CODEC_ID_PCM_F16LE, |
| | | AV_CODEC_ID_PCM_F24LE, |
| | | AV_CODEC_ID_PCM_VIDC, |
| | | |
| | | /* various ADPCM codecs */ |
| | | AV_CODEC_ID_ADPCM_IMA_QT = 0x11000, |
| | |
| | | AV_CODEC_ID_ADPCM_AICA, |
| | | AV_CODEC_ID_ADPCM_IMA_DAT4, |
| | | AV_CODEC_ID_ADPCM_MTAF, |
| | | AV_CODEC_ID_ADPCM_AGM, |
| | | |
| | | /* AMR */ |
| | | AV_CODEC_ID_AMR_NB = 0x12000, |
| | |
| | | AV_CODEC_ID_APTX, |
| | | AV_CODEC_ID_APTX_HD, |
| | | AV_CODEC_ID_SBC, |
| | | AV_CODEC_ID_ATRAC9, |
| | | AV_CODEC_ID_HCOM, |
| | | |
| | | /* subtitle codecs */ |
| | | AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs. |
| | |
| | | AV_CODEC_ID_PJS, |
| | | AV_CODEC_ID_ASS, |
| | | AV_CODEC_ID_HDMV_TEXT_SUBTITLE, |
| | | AV_CODEC_ID_TTML, |
| | | AV_CODEC_ID_ARIB_CAPTION, |
| | | |
| | | /* other specific kind of codecs (generally used for attachments) */ |
| | | AV_CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs. |
| | |
| | | * Use qpel MC. |
| | | */ |
| | | #define AV_CODEC_FLAG_QPEL (1 << 4) |
| | | /** |
| | | * Don't output frames whose parameters differ from first |
| | | * decoded frame in stream. |
| | | */ |
| | | #define AV_CODEC_FLAG_DROPCHANGED (1 << 5) |
| | | /** |
| | | * Use internal 2pass ratecontrol in first pass mode. |
| | | */ |
| | |
| | | #define AV_CODEC_CAP_HYBRID (1 << 19) |
| | | |
| | | /** |
| | | * This codec takes the reordered_opaque field from input AVFrames |
| | | * and returns it in the corresponding field in AVCodecContext after |
| | | * encoding. |
| | | */ |
| | | #define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE (1 << 20) |
| | | |
| | | /** |
| | | * Pan Scan area. |
| | | * This specifies the area which should be displayed. |
| | | * Note there may be multiple such areas for one frame. |
| | |
| | | * Maximum bitrate of the stream, in bits per second. |
| | | * Zero if unknown or unspecified. |
| | | */ |
| | | #if FF_API_UNSANITIZED_BITRATES |
| | | int max_bitrate; |
| | | #else |
| | | int64_t max_bitrate; |
| | | #endif |
| | | /** |
| | | * Minimum bitrate of the stream, in bits per second. |
| | | * Zero if unknown or unspecified. |
| | | */ |
| | | #if FF_API_UNSANITIZED_BITRATES |
| | | int min_bitrate; |
| | | #else |
| | | int64_t min_bitrate; |
| | | #endif |
| | | /** |
| | | * Average bitrate of the stream, in bits per second. |
| | | * Zero if unknown or unspecified. |
| | | */ |
| | | #if FF_API_UNSANITIZED_BITRATES |
| | | int avg_bitrate; |
| | | #else |
| | | int64_t avg_bitrate; |
| | | #endif |
| | | |
| | | /** |
| | | * The size of the buffer to which the ratecontrol is applied, in bits. |
| | |
| | | AV_PKT_DATA_METADATA_UPDATE, |
| | | |
| | | /** |
| | | * MPEGTS stream ID, this is required to pass the stream ID |
| | | * MPEGTS stream ID as uint8_t, this is required to pass the stream ID |
| | | * information from the demuxer to the corresponding muxer. |
| | | */ |
| | | AV_PKT_DATA_MPEGTS_STREAM_ID, |
| | |
| | | * The format is not part of ABI, use av_encryption_info_* methods to access. |
| | | */ |
| | | AV_PKT_DATA_ENCRYPTION_INFO, |
| | | |
| | | /** |
| | | * Active Format Description data consisting of a single byte as specified |
| | | * in ETSI TS 101 154 using AVActiveFormatDescription enum. |
| | | */ |
| | | AV_PKT_DATA_AFD, |
| | | |
| | | /** |
| | | * The number of side data types. |
| | |
| | | * The allocated memory should be AV_INPUT_BUFFER_PADDING_SIZE bytes larger |
| | | * than extradata_size to avoid problems if it is read with the bitstream reader. |
| | | * The bytewise contents of extradata must not depend on the architecture or CPU endianness. |
| | | * Must be allocated with the av_malloc() family of functions. |
| | | * - encoding: Set/allocated/freed by libavcodec. |
| | | * - decoding: Set/allocated/freed by user. |
| | | */ |
| | |
| | | |
| | | /** |
| | | * custom intra quantization matrix |
| | | * - encoding: Set by user, can be NULL. |
| | | * - decoding: Set by libavcodec. |
| | | * Must be allocated with the av_malloc() family of functions, and will be freed in |
| | | * avcodec_free_context(). |
| | | * - encoding: Set/allocated by user, freed by libavcodec. Can be NULL. |
| | | * - decoding: Set/allocated/freed by libavcodec. |
| | | */ |
| | | uint16_t *intra_matrix; |
| | | |
| | | /** |
| | | * custom inter quantization matrix |
| | | * - encoding: Set by user, can be NULL. |
| | | * - decoding: Set by libavcodec. |
| | | * Must be allocated with the av_malloc() family of functions, and will be freed in |
| | | * avcodec_free_context(). |
| | | * - encoding: Set/allocated by user, freed by libavcodec. Can be NULL. |
| | | * - decoding: Set/allocated/freed by libavcodec. |
| | | */ |
| | | uint16_t *inter_matrix; |
| | | |
| | |
| | | /** |
| | | * opaque 64-bit number (generally a PTS) that will be reordered and |
| | | * output in AVFrame.reordered_opaque |
| | | * - encoding: unused |
| | | * - encoding: Set by libavcodec to the reordered_opaque of the input |
| | | * frame corresponding to the last returned packet. Only |
| | | * supported by encoders with the |
| | | * AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE capability. |
| | | * - decoding: Set by user. |
| | | */ |
| | | int64_t reordered_opaque; |
| | |
| | | #define FF_PROFILE_MJPEG_JPEG_LS 0xf7 |
| | | |
| | | #define FF_PROFILE_SBC_MSBC 1 |
| | | |
| | | #define FF_PROFILE_PRORES_PROXY 0 |
| | | #define FF_PROFILE_PRORES_LT 1 |
| | | #define FF_PROFILE_PRORES_STANDARD 2 |
| | | #define FF_PROFILE_PRORES_HQ 3 |
| | | #define FF_PROFILE_PRORES_4444 4 |
| | | #define FF_PROFILE_PRORES_XQ 5 |
| | | |
| | | #define FF_PROFILE_ARIB_PROFILE_A 0 |
| | | #define FF_PROFILE_ARIB_PROFILE_C 1 |
| | | |
| | | /** |
| | | * level |
| | |
| | | * used as reference pictures). |
| | | */ |
| | | int extra_hw_frames; |
| | | |
| | | /** |
| | | * The percentage of damaged samples to discard a frame. |
| | | * |
| | | * - decoding: set by user |
| | | * - encoding: unused |
| | | */ |
| | | int discard_damaged_percentage; |
| | | } AVCodecContext; |
| | | |
| | | #if FF_API_CODEC_GET_SET |
| | |
| | | * Initialize a reference-counted packet from av_malloc()ed data. |
| | | * |
| | | * @param pkt packet to be initialized. This function will set the data, size, |
| | | * buf and destruct fields, all others are left untouched. |
| | | * and buf fields, all others are left untouched. |
| | | * @param data Data allocated by av_malloc() to be used as packet data. If this |
| | | * function returns successfully, the data is owned by the underlying AVBuffer. |
| | | * The caller may not access the data through other means. |
| | |
| | | * AVERROR_EOF: the decoder has been fully flushed, and there will be |
| | | * no more output frames |
| | | * AVERROR(EINVAL): codec not opened, or it is an encoder |
| | | * AVERROR_INPUT_CHANGED: current decoded frame has changed parameters |
| | | * with respect to first decoded frame. Applicable |
| | | * when flag AV_CODEC_FLAG_DROPCHANGED is set. |
| | | * other negative values: legitimate decoding errors |
| | | */ |
| | | int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame); |
| | |
| | | int (*init)(AVBSFContext *ctx); |
| | | int (*filter)(AVBSFContext *ctx, AVPacket *pkt); |
| | | void (*close)(AVBSFContext *ctx); |
| | | void (*flush)(AVBSFContext *ctx); |
| | | } AVBitStreamFilter; |
| | | |
| | | #if FF_API_OLD_BSF |
| | |
| | | int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt); |
| | | |
| | | /** |
| | | * Reset the internal bitstream filter state / flush internal buffers. |
| | | */ |
| | | void av_bsf_flush(AVBSFContext *ctx); |
| | | |
| | | /** |
| | | * Free a bitstream filter context and everything associated with it; write NULL |
| | | * into the supplied pointer. |
| | | */ |
| | |
| | | */ |
| | | int av_mediacodec_release_buffer(AVMediaCodecBuffer *buffer, int render); |
| | | |
| | | /** |
| | | * Release a MediaCodec buffer and render it at the given time to the surface |
| | | * that is associated with the decoder. The timestamp must be within one second |
| | | * of the current java/lang/System#nanoTime() (which is implemented using |
| | | * CLOCK_MONOTONIC on Android). See the Android MediaCodec documentation |
| | | * of android/media/MediaCodec#releaseOutputBuffer(int,long) for more details. |
| | | * |
| | | * @param buffer the buffer to render |
| | | * @param time timestamp in nanoseconds of when to render the buffer |
| | | * @return 0 on success, < 0 otherwise |
| | | */ |
| | | int av_mediacodec_render_buffer_at_time(AVMediaCodecBuffer *buffer, int64_t time); |
| | | |
| | | #endif /* AVCODEC_MEDIACODEC_H */ |
| | |
| | | #include "libavutil/version.h" |
| | | |
| | | #define LIBAVCODEC_VERSION_MAJOR 58 |
| | | #define LIBAVCODEC_VERSION_MINOR 18 |
| | | #define LIBAVCODEC_VERSION_MINOR 54 |
| | | #define LIBAVCODEC_VERSION_MICRO 100 |
| | | |
| | | #define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ |
| | |
| | | #ifndef FF_API_NEXT |
| | | #define FF_API_NEXT (LIBAVCODEC_VERSION_MAJOR < 59) |
| | | #endif |
| | | #ifndef FF_API_UNSANITIZED_BITRATES |
| | | #define FF_API_UNSANITIZED_BITRATES (LIBAVCODEC_VERSION_MAJOR < 59) |
| | | #endif |
| | | |
| | | |
| | | #endif /* AVCODEC_VERSION_H */ |
| | |
| | | * the end of stream, when it can contain less than nb_samples. |
| | | * |
| | | * @return The return codes have the same meaning as for |
| | | * av_buffersink_get_samples(). |
| | | * av_buffersink_get_frame(). |
| | | * |
| | | * @warning do not mix this function with av_buffersink_get_frame(). Use only one or |
| | | * the other with a single sink, not both. |
| | |
| | | AVBufferRef *hw_frames_ctx; |
| | | |
| | | /** |
| | | * Audio only, the audio sampling rate in samples per secon. |
| | | * Audio only, the audio sampling rate in samples per second. |
| | | */ |
| | | int sample_rate; |
| | | |
| | |
| | | #include "libavutil/version.h" |
| | | |
| | | #define LIBAVFILTER_VERSION_MAJOR 7 |
| | | #define LIBAVFILTER_VERSION_MINOR 16 |
| | | #define LIBAVFILTER_VERSION_MINOR 57 |
| | | #define LIBAVFILTER_VERSION_MICRO 100 |
| | | |
| | | |
| | | #define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ |
| | | LIBAVFILTER_VERSION_MINOR, \ |
| | | LIBAVFILTER_VERSION_MICRO) |
| | |
| | | * into component streams, and the reverse process of muxing - writing supplied |
| | | * data in a specified container format. It also has an @ref lavf_io |
| | | * "I/O module" which supports a number of protocols for accessing the data (e.g. |
| | | * file, tcp, http and others). Before using lavf, you need to call |
| | | * av_register_all() to register all compiled muxers, demuxers and protocols. |
| | | * file, tcp, http and others). |
| | | * Unless you are absolutely sure you won't use libavformat's network |
| | | * capabilities, you should also call avformat_network_init(). |
| | | * |
| | | * A supported input format is described by an AVInputFormat struct, conversely |
| | | * an output format is described by AVOutputFormat. You can iterate over all |
| | | * registered input/output formats using the av_iformat_next() / |
| | | * av_oformat_next() functions. The protocols layer is not part of the public |
| | | * API, so you can only get the names of supported protocols with the |
| | | * avio_enum_protocols() function. |
| | | * input/output formats using the av_demuxer_iterate / av_muxer_iterate() functions. |
| | | * The protocols layer is not part of the public API, so you can only get the names |
| | | * of supported protocols with the avio_enum_protocols() function. |
| | | * |
| | | * Main lavf structure used for both muxing and demuxing is AVFormatContext, |
| | | * which exports all information about the file being read or written. As with |
| | |
| | | * Otherwise, if AVPacket.buf is NULL, the packet data is backed by a |
| | | * static storage somewhere inside the demuxer and the packet is only valid |
| | | * until the next av_read_frame() call or closing the file. If the caller |
| | | * requires a longer lifetime, av_dup_packet() will make an av_malloc()ed copy |
| | | * of it. |
| | | * requires a longer lifetime, av_packet_make_refcounted() will ensure that |
| | | * the data is reference counted, copying the data if necessary. |
| | | * In both cases, the packet must be freed with av_packet_unref() when it is no |
| | | * longer needed. |
| | | * |
| | |
| | | * New public fields should be added right above. |
| | | ***************************************************************** |
| | | */ |
| | | struct AVOutputFormat *next; |
| | | /** |
| | | * The ff_const59 define is not part of the public API and will |
| | | * be removed without further warning. |
| | | */ |
| | | #if FF_API_AVIOFORMAT |
| | | #define ff_const59 |
| | | #else |
| | | #define ff_const59 const |
| | | #endif |
| | | ff_const59 struct AVOutputFormat *next; |
| | | /** |
| | | * size of private data so that it can be allocated in the wrapper |
| | | */ |
| | |
| | | |
| | | /** |
| | | * Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, |
| | | * AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, |
| | | * AVFMT_NOTIMESTAMPS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, |
| | | * AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS. |
| | | */ |
| | | int flags; |
| | |
| | | * New public fields should be added right above. |
| | | ***************************************************************** |
| | | */ |
| | | struct AVInputFormat *next; |
| | | ff_const59 struct AVInputFormat *next; |
| | | |
| | | /** |
| | | * Raw demuxers store their codec ID here. |
| | |
| | | * The buffer provided is guaranteed to be AVPROBE_PADDING_SIZE bytes |
| | | * big so you do not have to check for that unless you need more. |
| | | */ |
| | | int (*read_probe)(AVProbeData *); |
| | | int (*read_probe)(const AVProbeData *); |
| | | |
| | | /** |
| | | * Read the format header and initialize the AVFormatContext |
| | |
| | | #define AV_DISPOSITION_DESCRIPTIONS 0x20000 |
| | | #define AV_DISPOSITION_METADATA 0x40000 |
| | | #define AV_DISPOSITION_DEPENDENT 0x80000 ///< dependent audio stream (mix_type=0 in mpegts) |
| | | #define AV_DISPOSITION_STILL_IMAGE 0x100000 ///< still images in video stream (still_picture_flag=1 in mpegts) |
| | | |
| | | /** |
| | | * Options for behavior on timestamp wrap detection. |
| | |
| | | */ |
| | | int stream_identifier; |
| | | |
| | | /** |
| | | * Details of the MPEG-TS program which created this stream. |
| | | */ |
| | | int program_num; |
| | | int pmt_version; |
| | | int pmt_stream_idx; |
| | | |
| | | int64_t interleaver_chunk_size; |
| | | int64_t interleaver_chunk_duration; |
| | | |
| | |
| | | int program_num; |
| | | int pmt_pid; |
| | | int pcr_pid; |
| | | int pmt_version; |
| | | |
| | | /***************************************************************** |
| | | * All fields below this line are not part of the public API. They |
| | |
| | | * |
| | | * Demuxing only, set by avformat_open_input(). |
| | | */ |
| | | struct AVInputFormat *iformat; |
| | | ff_const59 struct AVInputFormat *iformat; |
| | | |
| | | /** |
| | | * The output container format. |
| | | * |
| | | * Muxing only, must be set by the caller before avformat_write_header(). |
| | | */ |
| | | struct AVOutputFormat *oformat; |
| | | ff_const59 struct AVOutputFormat *oformat; |
| | | |
| | | /** |
| | | * Format private data. This is an AVOptions-enabled struct |
| | |
| | | * This flag is mainly intended for testing. |
| | | */ |
| | | #define AVFMT_FLAG_BITEXACT 0x0400 |
| | | #define AVFMT_FLAG_MP4A_LATM 0x8000 ///< Enable RTP MP4A-LATM payload |
| | | #if FF_API_LAVF_MP4A_LATM |
| | | #define AVFMT_FLAG_MP4A_LATM 0x8000 ///< Deprecated, does nothing. |
| | | #endif |
| | | #define AVFMT_FLAG_SORT_DTS 0x10000 ///< try to interleave outputted packets by dts (using this flag can slow demuxing down) |
| | | #define AVFMT_FLAG_PRIV_OPT 0x20000 ///< Enable use of private options by delaying codec open (this could be made default once all code is converted) |
| | | #if FF_API_LAVF_KEEPSIDE_FLAG |
| | |
| | | * - decoding: set by user |
| | | */ |
| | | int max_streams; |
| | | |
| | | /** |
| | | * Skip duration calcuation in estimate_timings_from_pts. |
| | | * - encoding: unused |
| | | * - decoding: set by user |
| | | */ |
| | | int skip_estimate_duration_from_pts; |
| | | } AVFormatContext; |
| | | |
| | | #if FF_API_FORMAT_GET_SET |
| | |
| | | * @return >= 0 in case of success, a negative AVERROR code in case of |
| | | * failure |
| | | */ |
| | | int avformat_alloc_output_context2(AVFormatContext **ctx, AVOutputFormat *oformat, |
| | | int avformat_alloc_output_context2(AVFormatContext **ctx, ff_const59 AVOutputFormat *oformat, |
| | | const char *format_name, const char *filename); |
| | | |
| | | /** |
| | |
| | | /** |
| | | * Find AVInputFormat based on the short name of the input format. |
| | | */ |
| | | AVInputFormat *av_find_input_format(const char *short_name); |
| | | ff_const59 AVInputFormat *av_find_input_format(const char *short_name); |
| | | |
| | | /** |
| | | * Guess the file format. |
| | |
| | | * @param is_opened Whether the file is already opened; determines whether |
| | | * demuxers with or without AVFMT_NOFILE are probed. |
| | | */ |
| | | AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened); |
| | | ff_const59 AVInputFormat *av_probe_input_format(ff_const59 AVProbeData *pd, int is_opened); |
| | | |
| | | /** |
| | | * Guess the file format. |
| | |
| | | * If the score is <= AVPROBE_SCORE_MAX / 4 it is recommended |
| | | * to retry with a larger probe buffer. |
| | | */ |
| | | AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max); |
| | | ff_const59 AVInputFormat *av_probe_input_format2(ff_const59 AVProbeData *pd, int is_opened, int *score_max); |
| | | |
| | | /** |
| | | * Guess the file format. |
| | |
| | | * demuxers with or without AVFMT_NOFILE are probed. |
| | | * @param score_ret The score of the best detection. |
| | | */ |
| | | AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret); |
| | | ff_const59 AVInputFormat *av_probe_input_format3(ff_const59 AVProbeData *pd, int is_opened, int *score_ret); |
| | | |
| | | /** |
| | | * Probe a bytestream to determine the input format. Each time a probe returns |
| | |
| | | * the maximal score is AVPROBE_SCORE_MAX |
| | | * AVERROR code otherwise |
| | | */ |
| | | int av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt, |
| | | int av_probe_input_buffer2(AVIOContext *pb, ff_const59 AVInputFormat **fmt, |
| | | const char *url, void *logctx, |
| | | unsigned int offset, unsigned int max_probe_size); |
| | | |
| | | /** |
| | | * Like av_probe_input_buffer2() but returns 0 on success |
| | | */ |
| | | int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt, |
| | | int av_probe_input_buffer(AVIOContext *pb, ff_const59 AVInputFormat **fmt, |
| | | const char *url, void *logctx, |
| | | unsigned int offset, unsigned int max_probe_size); |
| | | |
| | |
| | | * |
| | | * @note If you want to use custom IO, preallocate the format context and set its pb field. |
| | | */ |
| | | int avformat_open_input(AVFormatContext **ps, const char *url, AVInputFormat *fmt, AVDictionary **options); |
| | | int avformat_open_input(AVFormatContext **ps, const char *url, ff_const59 AVInputFormat *fmt, AVDictionary **options); |
| | | |
| | | attribute_deprecated |
| | | int av_demuxer_open(AVFormatContext *ic); |
| | |
| | | * @param mime_type if non-NULL checks if mime_type matches with the |
| | | * MIME type of the registered formats |
| | | */ |
| | | AVOutputFormat *av_guess_format(const char *short_name, |
| | | ff_const59 AVOutputFormat *av_guess_format(const char *short_name, |
| | | const char *filename, |
| | | const char *mime_type); |
| | | |
| | | /** |
| | | * Guess the codec ID based upon muxer and filename. |
| | | */ |
| | | enum AVCodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name, |
| | | enum AVCodecID av_guess_codec(ff_const59 AVOutputFormat *fmt, const char *short_name, |
| | | const char *filename, const char *mime_type, |
| | | enum AVMediaType type); |
| | | |
| | |
| | | int (*write_packet)(void *opaque, uint8_t *buf, int buf_size); |
| | | int64_t (*seek)(void *opaque, int64_t offset, int whence); |
| | | int64_t pos; /**< position in the file of the current buffer */ |
| | | int eof_reached; /**< true if eof reached */ |
| | | int eof_reached; /**< true if was unable to read due to error or eof */ |
| | | int write_flag; /**< true if open for writing */ |
| | | int max_packet_size; |
| | | unsigned long checksum; |
| | |
| | | int64_t avio_size(AVIOContext *s); |
| | | |
| | | /** |
| | | * feof() equivalent for AVIOContext. |
| | | * @return non zero if and only if end of file |
| | | * Similar to feof() but also returns nonzero on read errors. |
| | | * @return non zero if and only if at end of file or a read error happened when reading. |
| | | */ |
| | | int avio_feof(AVIOContext *s); |
| | | |
| | |
| | | // Major bumping may affect Ticket5467, 5421, 5451(compatibility with Chromium) |
| | | // Also please add any ticket numbers that you believe might be affected here |
| | | #define LIBAVFORMAT_VERSION_MAJOR 58 |
| | | #define LIBAVFORMAT_VERSION_MINOR 12 |
| | | #define LIBAVFORMAT_VERSION_MINOR 29 |
| | | #define LIBAVFORMAT_VERSION_MICRO 100 |
| | | |
| | | #define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \ |
| | |
| | | #ifndef FF_API_HLS_WRAP |
| | | #define FF_API_HLS_WRAP (LIBAVFORMAT_VERSION_MAJOR < 59) |
| | | #endif |
| | | #ifndef FF_API_HLS_USE_LOCALTIME |
| | | #define FF_API_HLS_USE_LOCALTIME (LIBAVFORMAT_VERSION_MAJOR < 59) |
| | | #endif |
| | | #ifndef FF_API_LAVF_KEEPSIDE_FLAG |
| | | #define FF_API_LAVF_KEEPSIDE_FLAG (LIBAVFORMAT_VERSION_MAJOR < 59) |
| | | #endif |
| | |
| | | #ifndef FF_API_NEXT |
| | | #define FF_API_NEXT (LIBAVFORMAT_VERSION_MAJOR < 59) |
| | | #endif |
| | | #ifndef FF_API_DASH_MIN_SEG_DURATION |
| | | #define FF_API_DASH_MIN_SEG_DURATION (LIBAVFORMAT_VERSION_MAJOR < 59) |
| | | #endif |
| | | #ifndef FF_API_LAVF_MP4A_LATM |
| | | #define FF_API_LAVF_MP4A_LATM (LIBAVFORMAT_VERSION_MAJOR < 59) |
| | | #endif |
| | | #ifndef FF_API_AVIOFORMAT |
| | | #define FF_API_AVIOFORMAT (LIBAVFORMAT_VERSION_MAJOR < 59) |
| | | #endif |
| | | |
| | | |
| | | #ifndef FF_API_R_FRAME_RATE |
| | |
| | | #endif |
| | | |
| | | /** |
| | | * Assert that floating point opperations can be executed. |
| | | * Assert that floating point operations can be executed. |
| | | * |
| | | * This will av_assert0() that the cpu is not in MMX state on X86 |
| | | */ |
| | |
| | | int av_match_list(const char *name, const char *list, char separator); |
| | | |
| | | /** |
| | | * See libc sscanf manual for more information. |
| | | * Locale-independent sscanf implementation. |
| | | */ |
| | | int av_sscanf(const char *string, const char *format, ...); |
| | | |
| | | /** |
| | | * @} |
| | | */ |
| | | |
| | |
| | | * The size of this struct is not part of the public ABI. |
| | | */ |
| | | typedef struct AVEncryptionInfo { |
| | | /** The fourcc encryption scheme. */ |
| | | /** The fourcc encryption scheme, in big-endian byte order. */ |
| | | uint32_t scheme; |
| | | |
| | | /** |
| | |
| | | */ |
| | | uint8_t* data; |
| | | uint32_t data_size; |
| | | |
| | | /** |
| | | * An optional pointer to the next initialization info in the list. |
| | | */ |
| | | struct AVEncryptionInitInfo *next; |
| | | } AVEncryptionInitInfo; |
| | | |
| | | /** |
| | |
| | | * |
| | | * @param subsample_count The number of subsamples. |
| | | * @param key_id_size The number of bytes in the key ID, should be 16. |
| | | * @param key_id_size The number of bytes in the IV, should be 16. |
| | | * @param iv_size The number of bytes in the IV, should be 16. |
| | | * |
| | | * @return The new AVEncryptionInfo structure, or NULL on error. |
| | | */ |
| | |
| | | /* Automatically generated by version.sh, do not manually edit! */ |
| | | #ifndef AVUTIL_FFVERSION_H |
| | | #define AVUTIL_FFVERSION_H |
| | | #define FFMPEG_VERSION "4.0.2" |
| | | #define FFMPEG_VERSION "4.2" |
| | | #endif /* AVUTIL_FFVERSION_H */ |
| | |
| | | * allocated buffer or map it with mmap() when available. |
| | | * In case of success set *bufptr to the read or mmapped buffer, and |
| | | * *size to the size in bytes of the buffer in *bufptr. |
| | | * Unlike mmap this function succeeds with zero sized files, in this |
| | | * case *bufptr will be set to NULL and *size will be set to 0. |
| | | * The returned buffer must be released with av_file_unmap(). |
| | | * |
| | | * @param log_offset loglevel offset used for logging |
| | |
| | | */ |
| | | AV_FRAME_DATA_QP_TABLE_DATA, |
| | | #endif |
| | | |
| | | /** |
| | | * Timecode which conforms to SMPTE ST 12-1. The data is an array of 4 uint32_t |
| | | * where the first uint32_t describes how many (1-3) of the other timecodes are used. |
| | | * The timecode format is described in the av_timecode_get_smpte_from_framenum() |
| | | * function in libavutil/timecode.c. |
| | | */ |
| | | AV_FRAME_DATA_S12M_TIMECODE, |
| | | |
| | | /** |
| | | * HDR dynamic metadata associated with a video frame. The payload is |
| | | * an AVDynamicHDRPlus type and contains information for color |
| | | * volume transform - application 4 of SMPTE 2094-40:2016 standard. |
| | | */ |
| | | AV_FRAME_DATA_DYNAMIC_HDR_PLUS, |
| | | |
| | | /** |
| | | * Regions Of Interest, the data is an array of AVRegionOfInterest type, the number of |
| | | * array element is implied by AVFrameSideData.size / AVRegionOfInterest.self_size. |
| | | */ |
| | | AV_FRAME_DATA_REGIONS_OF_INTEREST, |
| | | }; |
| | | |
| | | enum AVActiveFormatDescription { |
| | |
| | | AVDictionary *metadata; |
| | | AVBufferRef *buf; |
| | | } AVFrameSideData; |
| | | |
| | | /** |
| | | * Structure describing a single Region Of Interest. |
| | | * |
| | | * When multiple regions are defined in a single side-data block, they |
| | | * should be ordered from most to least important - some encoders are only |
| | | * capable of supporting a limited number of distinct regions, so will have |
| | | * to truncate the list. |
| | | * |
| | | * When overlapping regions are defined, the first region containing a given |
| | | * area of the frame applies. |
| | | */ |
| | | typedef struct AVRegionOfInterest { |
| | | /** |
| | | * Must be set to the size of this data structure (that is, |
| | | * sizeof(AVRegionOfInterest)). |
| | | */ |
| | | uint32_t self_size; |
| | | /** |
| | | * Distance in pixels from the top edge of the frame to the top and |
| | | * bottom edges and from the left edge of the frame to the left and |
| | | * right edges of the rectangle defining this region of interest. |
| | | * |
| | | * The constraints on a region are encoder dependent, so the region |
| | | * actually affected may be slightly larger for alignment or other |
| | | * reasons. |
| | | */ |
| | | int top; |
| | | int bottom; |
| | | int left; |
| | | int right; |
| | | /** |
| | | * Quantisation offset. |
| | | * |
| | | * Must be in the range -1 to +1. A value of zero indicates no quality |
| | | * change. A negative value asks for better quality (less quantisation), |
| | | * while a positive value asks for worse quality (greater quantisation). |
| | | * |
| | | * The range is calibrated so that the extreme values indicate the |
| | | * largest possible offset - if the rest of the frame is encoded with the |
| | | * worst possible quality, an offset of -1 indicates that this region |
| | | * should be encoded with the best possible quality anyway. Intermediate |
| | | * values are then interpolated in some codec-dependent way. |
| | | * |
| | | * For example, in 10-bit H.264 the quantisation parameter varies between |
| | | * -12 and 51. A typical qoffset value of -1/10 therefore indicates that |
| | | * this region should be encoded with a QP around one-tenth of the full |
| | | * range better than the rest of the frame. So, if most of the frame |
| | | * were to be encoded with a QP of around 30, this region would get a QP |
| | | * of around 24 (an offset of approximately -1/10 * (51 - -12) = -6.3). |
| | | * An extreme value of -1 would indicate that this region should be |
| | | * encoded with the best possible quality regardless of the treatment of |
| | | * the rest of the frame - that is, should be encoded at a QP of -12. |
| | | */ |
| | | AVRational qoffset; |
| | | } AVRegionOfInterest; |
| | | |
| | | /** |
| | | * This structure describes decoded (raw) audio or video data. |
| | |
| | | * that time, |
| | | * the decoder reorders values as needed and sets AVFrame.reordered_opaque |
| | | * to exactly one of the values provided by the user through AVCodecContext.reordered_opaque |
| | | * @deprecated in favor of pkt_pts |
| | | */ |
| | | int64_t reordered_opaque; |
| | | |
| | |
| | | int decode_error_flags; |
| | | #define FF_DECODE_ERROR_INVALID_BITSTREAM 1 |
| | | #define FF_DECODE_ERROR_MISSING_REFERENCE 2 |
| | | #define FF_DECODE_ERROR_CONCEALMENT_ACTIVE 4 |
| | | #define FF_DECODE_ERROR_DECODE_SLICES 8 |
| | | |
| | | /** |
| | | * number of audio channels, only used for audio. |
New file |
| | |
| | | /* |
| | | * Copyright (c) 2018 Mohammad Izadi <moh.izadi at gmail.com> |
| | | * |
| | | * This file is part of FFmpeg. |
| | | * |
| | | * FFmpeg is free software; you can redistribute it and/or |
| | | * modify it under the terms of the GNU Lesser General Public |
| | | * License as published by the Free Software Foundation; either |
| | | * version 2.1 of the License, or (at your option) any later version. |
| | | * |
| | | * FFmpeg is distributed in the hope that it will be useful, |
| | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| | | * Lesser General Public License for more details. |
| | | * |
| | | * You should have received a copy of the GNU Lesser General Public |
| | | * License along with FFmpeg; if not, write to the Free Software |
| | | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| | | */ |
| | | |
| | | #ifndef AVUTIL_HDR_DYNAMIC_METADATA_H |
| | | #define AVUTIL_HDR_DYNAMIC_METADATA_H |
| | | |
| | | #include "frame.h" |
| | | #include "rational.h" |
| | | |
| | | /** |
| | | * Option for overlapping elliptical pixel selectors in an image. |
| | | */ |
| | | enum AVHDRPlusOverlapProcessOption { |
| | | AV_HDR_PLUS_OVERLAP_PROCESS_WEIGHTED_AVERAGING = 0, |
| | | AV_HDR_PLUS_OVERLAP_PROCESS_LAYERING = 1, |
| | | }; |
| | | |
| | | /** |
| | | * Represents the percentile at a specific percentage in |
| | | * a distribution. |
| | | */ |
| | | typedef struct AVHDRPlusPercentile { |
| | | /** |
| | | * The percentage value corresponding to a specific percentile linearized |
| | | * RGB value in the processing window in the scene. The value shall be in |
| | | * the range of 0 to100, inclusive. |
| | | */ |
| | | uint8_t percentage; |
| | | |
| | | /** |
| | | * The linearized maxRGB value at a specific percentile in the processing |
| | | * window in the scene. The value shall be in the range of 0 to 1, inclusive |
| | | * and in multiples of 0.00001. |
| | | */ |
| | | AVRational percentile; |
| | | } AVHDRPlusPercentile; |
| | | |
| | | /** |
| | | * Color transform parameters at a processing window in a dynamic metadata for |
| | | * SMPTE 2094-40. |
| | | */ |
| | | typedef struct AVHDRPlusColorTransformParams { |
| | | /** |
| | | * The relative x coordinate of the top left pixel of the processing |
| | | * window. The value shall be in the range of 0 and 1, inclusive and |
| | | * in multiples of 1/(width of Picture - 1). The value 1 corresponds |
| | | * to the absolute coordinate of width of Picture - 1. The value for |
| | | * first processing window shall be 0. |
| | | */ |
| | | AVRational window_upper_left_corner_x; |
| | | |
| | | /** |
| | | * The relative y coordinate of the top left pixel of the processing |
| | | * window. The value shall be in the range of 0 and 1, inclusive and |
| | | * in multiples of 1/(height of Picture - 1). The value 1 corresponds |
| | | * to the absolute coordinate of height of Picture - 1. The value for |
| | | * first processing window shall be 0. |
| | | */ |
| | | AVRational window_upper_left_corner_y; |
| | | |
| | | /** |
| | | * The relative x coordinate of the bottom right pixel of the processing |
| | | * window. The value shall be in the range of 0 and 1, inclusive and |
| | | * in multiples of 1/(width of Picture - 1). The value 1 corresponds |
| | | * to the absolute coordinate of width of Picture - 1. The value for |
| | | * first processing window shall be 1. |
| | | */ |
| | | AVRational window_lower_right_corner_x; |
| | | |
| | | /** |
| | | * The relative y coordinate of the bottom right pixel of the processing |
| | | * window. The value shall be in the range of 0 and 1, inclusive and |
| | | * in multiples of 1/(height of Picture - 1). The value 1 corresponds |
| | | * to the absolute coordinate of height of Picture - 1. The value for |
| | | * first processing window shall be 1. |
| | | */ |
| | | AVRational window_lower_right_corner_y; |
| | | |
| | | /** |
| | | * The x coordinate of the center position of the concentric internal and |
| | | * external ellipses of the elliptical pixel selector in the processing |
| | | * window. The value shall be in the range of 0 to (width of Picture - 1), |
| | | * inclusive and in multiples of 1 pixel. |
| | | */ |
| | | uint16_t center_of_ellipse_x; |
| | | |
| | | /** |
| | | * The y coordinate of the center position of the concentric internal and |
| | | * external ellipses of the elliptical pixel selector in the processing |
| | | * window. The value shall be in the range of 0 to (height of Picture - 1), |
| | | * inclusive and in multiples of 1 pixel. |
| | | */ |
| | | uint16_t center_of_ellipse_y; |
| | | |
| | | /** |
| | | * The clockwise rotation angle in degree of arc with respect to the |
| | | * positive direction of the x-axis of the concentric internal and external |
| | | * ellipses of the elliptical pixel selector in the processing window. The |
| | | * value shall be in the range of 0 to 180, inclusive and in multiples of 1. |
| | | */ |
| | | uint8_t rotation_angle; |
| | | |
| | | /** |
| | | * The semi-major axis value of the internal ellipse of the elliptical pixel |
| | | * selector in amount of pixels in the processing window. The value shall be |
| | | * in the range of 1 to 65535, inclusive and in multiples of 1 pixel. |
| | | */ |
| | | uint16_t semimajor_axis_internal_ellipse; |
| | | |
| | | /** |
| | | * The semi-major axis value of the external ellipse of the elliptical pixel |
| | | * selector in amount of pixels in the processing window. The value |
| | | * shall not be less than semimajor_axis_internal_ellipse of the current |
| | | * processing window. The value shall be in the range of 1 to 65535, |
| | | * inclusive and in multiples of 1 pixel. |
| | | */ |
| | | uint16_t semimajor_axis_external_ellipse; |
| | | |
| | | /** |
| | | * The semi-minor axis value of the external ellipse of the elliptical pixel |
| | | * selector in amount of pixels in the processing window. The value shall be |
| | | * in the range of 1 to 65535, inclusive and in multiples of 1 pixel. |
| | | */ |
| | | uint16_t semiminor_axis_external_ellipse; |
| | | |
| | | /** |
| | | * Overlap process option indicates one of the two methods of combining |
| | | * rendered pixels in the processing window in an image with at least one |
| | | * elliptical pixel selector. For overlapping elliptical pixel selectors |
| | | * in an image, overlap_process_option shall have the same value. |
| | | */ |
| | | enum AVHDRPlusOverlapProcessOption overlap_process_option; |
| | | |
| | | /** |
| | | * The maximum of the color components of linearized RGB values in the |
| | | * processing window in the scene. The values should be in the range of 0 to |
| | | * 1, inclusive and in multiples of 0.00001. maxscl[ 0 ], maxscl[ 1 ], and |
| | | * maxscl[ 2 ] are corresponding to R, G, B color components respectively. |
| | | */ |
| | | AVRational maxscl[3]; |
| | | |
| | | /** |
| | | * The average of linearized maxRGB values in the processing window in the |
| | | * scene. The value should be in the range of 0 to 1, inclusive and in |
| | | * multiples of 0.00001. |
| | | */ |
| | | AVRational average_maxrgb; |
| | | |
| | | /** |
| | | * The number of linearized maxRGB values at given percentiles in the |
| | | * processing window in the scene. The maximum value shall be 15. |
| | | */ |
| | | uint8_t num_distribution_maxrgb_percentiles; |
| | | |
| | | /** |
| | | * The linearized maxRGB values at given percentiles in the |
| | | * processing window in the scene. |
| | | */ |
| | | AVHDRPlusPercentile distribution_maxrgb[15]; |
| | | |
| | | /** |
| | | * The fraction of selected pixels in the image that contains the brightest |
| | | * pixel in the scene. The value shall be in the range of 0 to 1, inclusive |
| | | * and in multiples of 0.001. |
| | | */ |
| | | AVRational fraction_bright_pixels; |
| | | |
| | | /** |
| | | * This flag indicates that the metadata for the tone mapping function in |
| | | * the processing window is present (for value of 1). |
| | | */ |
| | | uint8_t tone_mapping_flag; |
| | | |
| | | /** |
| | | * The x coordinate of the separation point between the linear part and the |
| | | * curved part of the tone mapping function. The value shall be in the range |
| | | * of 0 to 1, excluding 0 and in multiples of 1/4095. |
| | | */ |
| | | AVRational knee_point_x; |
| | | |
| | | /** |
| | | * The y coordinate of the separation point between the linear part and the |
| | | * curved part of the tone mapping function. The value shall be in the range |
| | | * of 0 to 1, excluding 0 and in multiples of 1/4095. |
| | | */ |
| | | AVRational knee_point_y; |
| | | |
| | | /** |
| | | * The number of the intermediate anchor parameters of the tone mapping |
| | | * function in the processing window. The maximum value shall be 15. |
| | | */ |
| | | uint8_t num_bezier_curve_anchors; |
| | | |
| | | /** |
| | | * The intermediate anchor parameters of the tone mapping function in the |
| | | * processing window in the scene. The values should be in the range of 0 |
| | | * to 1, inclusive and in multiples of 1/1023. |
| | | */ |
| | | AVRational bezier_curve_anchors[15]; |
| | | |
| | | /** |
| | | * This flag shall be equal to 0 in bitstreams conforming to this version of |
| | | * this Specification. Other values are reserved for future use. |
| | | */ |
| | | uint8_t color_saturation_mapping_flag; |
| | | |
| | | /** |
| | | * The color saturation gain in the processing window in the scene. The |
| | | * value shall be in the range of 0 to 63/8, inclusive and in multiples of |
| | | * 1/8. The default value shall be 1. |
| | | */ |
| | | AVRational color_saturation_weight; |
| | | } AVHDRPlusColorTransformParams; |
| | | |
| | | /** |
| | | * This struct represents dynamic metadata for color volume transform - |
| | | * application 4 of SMPTE 2094-40:2016 standard. |
| | | * |
| | | * To be used as payload of a AVFrameSideData or AVPacketSideData with the |
| | | * appropriate type. |
| | | * |
| | | * @note The struct should be allocated with |
| | | * av_dynamic_hdr_plus_alloc() and its size is not a part of |
| | | * the public ABI. |
| | | */ |
| | | typedef struct AVDynamicHDRPlus { |
| | | /** |
| | | * Country code by Rec. ITU-T T.35 Annex A. The value shall be 0xB5. |
| | | */ |
| | | uint8_t itu_t_t35_country_code; |
| | | |
| | | /** |
| | | * Application version in the application defining document in ST-2094 |
| | | * suite. The value shall be set to 0. |
| | | */ |
| | | uint8_t application_version; |
| | | |
| | | /** |
| | | * The number of processing windows. The value shall be in the range |
| | | * of 1 to 3, inclusive. |
| | | */ |
| | | uint8_t num_windows; |
| | | |
| | | /** |
| | | * The color transform parameters for every processing window. |
| | | */ |
| | | AVHDRPlusColorTransformParams params[3]; |
| | | |
| | | /** |
| | | * The nominal maximum display luminance of the targeted system display, |
| | | * in units of 0.0001 candelas per square metre. The value shall be in |
| | | * the range of 0 to 10000, inclusive. |
| | | */ |
| | | AVRational targeted_system_display_maximum_luminance; |
| | | |
| | | /** |
| | | * This flag shall be equal to 0 in bit streams conforming to this version |
| | | * of this Specification. The value 1 is reserved for future use. |
| | | */ |
| | | uint8_t targeted_system_display_actual_peak_luminance_flag; |
| | | |
| | | /** |
| | | * The number of rows in the targeted system_display_actual_peak_luminance |
| | | * array. The value shall be in the range of 2 to 25, inclusive. |
| | | */ |
| | | uint8_t num_rows_targeted_system_display_actual_peak_luminance; |
| | | |
| | | /** |
| | | * The number of columns in the |
| | | * targeted_system_display_actual_peak_luminance array. The value shall be |
| | | * in the range of 2 to 25, inclusive. |
| | | */ |
| | | uint8_t num_cols_targeted_system_display_actual_peak_luminance; |
| | | |
| | | /** |
| | | * The normalized actual peak luminance of the targeted system display. The |
| | | * values should be in the range of 0 to 1, inclusive and in multiples of |
| | | * 1/15. |
| | | */ |
| | | AVRational targeted_system_display_actual_peak_luminance[25][25]; |
| | | |
| | | /** |
| | | * This flag shall be equal to 0 in bitstreams conforming to this version of |
| | | * this Specification. The value 1 is reserved for future use. |
| | | */ |
| | | uint8_t mastering_display_actual_peak_luminance_flag; |
| | | |
| | | /** |
| | | * The number of rows in the mastering_display_actual_peak_luminance array. |
| | | * The value shall be in the range of 2 to 25, inclusive. |
| | | */ |
| | | uint8_t num_rows_mastering_display_actual_peak_luminance; |
| | | |
| | | /** |
| | | * The number of columns in the mastering_display_actual_peak_luminance |
| | | * array. The value shall be in the range of 2 to 25, inclusive. |
| | | */ |
| | | uint8_t num_cols_mastering_display_actual_peak_luminance; |
| | | |
| | | /** |
| | | * The normalized actual peak luminance of the mastering display used for |
| | | * mastering the image essence. The values should be in the range of 0 to 1, |
| | | * inclusive and in multiples of 1/15. |
| | | */ |
| | | AVRational mastering_display_actual_peak_luminance[25][25]; |
| | | } AVDynamicHDRPlus; |
| | | |
| | | /** |
| | | * Allocate an AVDynamicHDRPlus structure and set its fields to |
| | | * default values. The resulting struct can be freed using av_freep(). |
| | | * |
| | | * @return An AVDynamicHDRPlus filled with default values or NULL |
| | | * on failure. |
| | | */ |
| | | AVDynamicHDRPlus *av_dynamic_hdr_plus_alloc(size_t *size); |
| | | |
| | | /** |
| | | * Allocate a complete AVDynamicHDRPlus and add it to the frame. |
| | | * @param frame The frame which side data is added to. |
| | | * |
| | | * @return The AVDynamicHDRPlus structure to be filled by caller or NULL |
| | | * on failure. |
| | | */ |
| | | AVDynamicHDRPlus *av_dynamic_hdr_plus_create_side_data(AVFrame *frame); |
| | | |
| | | #endif /* AVUTIL_HDR_DYNAMIC_METADATA_H */ |
| | |
| | | */ |
| | | typedef struct AVCUDADeviceContext { |
| | | CUcontext cuda_ctx; |
| | | CUstream stream; |
| | | AVCUDADeviceContextInternal *internal; |
| | | } AVCUDADeviceContext; |
| | | |
| | |
| | | # define AV_WN64A(p, v) AV_WNA(64, p, v) |
| | | #endif |
| | | |
| | | #if AV_HAVE_BIGENDIAN |
| | | # define AV_RLA(s, p) av_bswap##s(AV_RN##s##A(p)) |
| | | # define AV_WLA(s, p, v) AV_WN##s##A(p, av_bswap##s(v)) |
| | | #else |
| | | # define AV_RLA(s, p) AV_RN##s##A(p) |
| | | # define AV_WLA(s, p, v) AV_WN##s##A(p, v) |
| | | #endif |
| | | |
| | | #ifndef AV_RL64A |
| | | # define AV_RL64A(p) AV_RLA(64, p) |
| | | #endif |
| | | #ifndef AV_WL64A |
| | | # define AV_WL64A(p, v) AV_WLA(64, p, v) |
| | | #endif |
| | | |
| | | /* |
| | | * The AV_COPYxxU macros are suitable for copying data to/from unaligned |
| | | * memory locations. |
New file |
| | |
| | | /* |
| | | * LZO 1x decompression |
| | | * copyright (c) 2006 Reimar Doeffinger |
| | | * |
| | | * This file is part of FFmpeg. |
| | | * |
| | | * FFmpeg is free software; you can redistribute it and/or |
| | | * modify it under the terms of the GNU Lesser General Public |
| | | * License as published by the Free Software Foundation; either |
| | | * version 2.1 of the License, or (at your option) any later version. |
| | | * |
| | | * FFmpeg is distributed in the hope that it will be useful, |
| | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| | | * Lesser General Public License for more details. |
| | | * |
| | | * You should have received a copy of the GNU Lesser General Public |
| | | * License along with FFmpeg; if not, write to the Free Software |
| | | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| | | */ |
| | | |
| | | #ifndef AVUTIL_LZO_H |
| | | #define AVUTIL_LZO_H |
| | | |
| | | /** |
| | | * @defgroup lavu_lzo LZO |
| | | * @ingroup lavu_crypto |
| | | * |
| | | * @{ |
| | | */ |
| | | |
| | | #include <stdint.h> |
| | | |
| | | /** @name Error flags returned by av_lzo1x_decode |
| | | * @{ */ |
| | | /// end of the input buffer reached before decoding finished |
| | | #define AV_LZO_INPUT_DEPLETED 1 |
| | | /// decoded data did not fit into output buffer |
| | | #define AV_LZO_OUTPUT_FULL 2 |
| | | /// a reference to previously decoded data was wrong |
| | | #define AV_LZO_INVALID_BACKPTR 4 |
| | | /// a non-specific error in the compressed bitstream |
| | | #define AV_LZO_ERROR 8 |
| | | /** @} */ |
| | | |
| | | #define AV_LZO_INPUT_PADDING 8 |
| | | #define AV_LZO_OUTPUT_PADDING 12 |
| | | |
| | | /** |
| | | * @brief Decodes LZO 1x compressed data. |
| | | * @param out output buffer |
| | | * @param outlen size of output buffer, number of bytes left are returned here |
| | | * @param in input buffer |
| | | * @param inlen size of input buffer, number of bytes left are returned here |
| | | * @return 0 on success, otherwise a combination of the error flags above |
| | | * |
| | | * Make sure all buffers are appropriately padded, in must provide |
| | | * AV_LZO_INPUT_PADDING, out must provide AV_LZO_OUTPUT_PADDING additional bytes. |
| | | */ |
| | | int av_lzo1x_decode(void *out, int *outlen, const void *in, int *inlen); |
| | | |
| | | /** |
| | | * @} |
| | | */ |
| | | |
| | | #endif /* AVUTIL_LZO_H */ |
| | |
| | | * @warning Unlike av_malloc(), the allocated memory is not guaranteed to be |
| | | * correctly aligned. |
| | | */ |
| | | av_alloc_size(2, 3) int av_reallocp_array(void *ptr, size_t nmemb, size_t size); |
| | | int av_reallocp_array(void *ptr, size_t nmemb, size_t size); |
| | | |
| | | /** |
| | | * Reallocate the given buffer if it is not large enough, otherwise do nothing. |
| | |
| | | * @endcode |
| | | * |
| | | * @param[in,out] ptr Already allocated buffer, or `NULL` |
| | | * @param[in,out] size Pointer to current size of buffer `ptr`. `*size` is |
| | | * changed to `min_size` in case of success or 0 in |
| | | * case of failure |
| | | * @param[in] min_size New size of buffer `ptr` |
| | | * @param[in,out] size Pointer to the size of buffer `ptr`. `*size` is |
| | | * updated to the new allocated size, in particular 0 |
| | | * in case of failure. |
| | | * @param[in] min_size Desired minimal size of buffer `ptr` |
| | | * @return `ptr` if the buffer is large enough, a pointer to newly reallocated |
| | | * buffer if the buffer was not large enough, or `NULL` in case of |
| | | * error |
| | |
| | | * @param[in,out] ptr Pointer to pointer to an already allocated buffer. |
| | | * `*ptr` will be overwritten with pointer to new |
| | | * buffer on success or `NULL` on failure |
| | | * @param[in,out] size Pointer to current size of buffer `*ptr`. `*size` is |
| | | * changed to `min_size` in case of success or 0 in |
| | | * case of failure |
| | | * @param[in] min_size New size of buffer `*ptr` |
| | | * @param[in,out] size Pointer to the size of buffer `*ptr`. `*size` is |
| | | * updated to the new allocated size, in particular 0 |
| | | * in case of failure. |
| | | * @param[in] min_size Desired minimal size of buffer `*ptr` |
| | | * @see av_realloc() |
| | | * @see av_fast_mallocz() |
| | | */ |
| | |
| | | * @param[in,out] ptr Pointer to pointer to an already allocated buffer. |
| | | * `*ptr` will be overwritten with pointer to new |
| | | * buffer on success or `NULL` on failure |
| | | * @param[in,out] size Pointer to current size of buffer `*ptr`. `*size` is |
| | | * changed to `min_size` in case of success or 0 in |
| | | * case of failure |
| | | * @param[in] min_size New size of buffer `*ptr` |
| | | * @param[in,out] size Pointer to the size of buffer `*ptr`. `*size` is |
| | | * updated to the new allocated size, in particular 0 |
| | | * in case of failure. |
| | | * @param[in] min_size Desired minimal size of buffer `*ptr` |
| | | * @see av_fast_malloc() |
| | | */ |
| | | void av_fast_mallocz(void *ptr, unsigned int *size, size_t min_size); |
| | |
| | | #define AV_OPT_FLAG_READONLY 128 |
| | | #define AV_OPT_FLAG_BSF_PARAM (1<<8) ///< a generic parameter which can be set by the user for bit stream filtering |
| | | #define AV_OPT_FLAG_FILTERING_PARAM (1<<16) ///< a generic parameter which can be set by the user for filtering |
| | | #define AV_OPT_FLAG_DEPRECATED (1<<17) ///< set if option is deprecated, users should refer to AVOption.help text for more information |
| | | //FIXME think about enc-audio, ... style flags |
| | | |
| | | /** |
| | |
| | | |
| | | /** |
| | | * The pixel format has an alpha channel. This is set on all formats that |
| | | * support alpha in some way. The exception is AV_PIX_FMT_PAL8, which can |
| | | * carry alpha as part of the palette. Details are explained in the |
| | | * AVPixelFormat enum, and are also encoded in the corresponding |
| | | * AVPixFmtDescriptor. |
| | | * |
| | | * The alpha is always straight, never pre-multiplied. |
| | | * support alpha in some way, including AV_PIX_FMT_PAL8. The alpha is always |
| | | * straight, never pre-multiplied. |
| | | * |
| | | * If a codec or a filter does not support alpha, it should set all alpha to |
| | | * opaque, or use the equivalent pixel formats without alpha component, e.g. |
| | |
| | | * format writes the values corresponding to the palette |
| | | * component c in data[1] to dst, rather than the palette indexes in |
| | | * data[0]. The behavior is undefined if the format is not paletted. |
| | | * @param dst_element_size size of elements in dst array (2 or 4 byte) |
| | | */ |
| | | void av_read_image_line2(void *dst, const uint8_t *data[4], |
| | | const int linesize[4], const AVPixFmtDescriptor *desc, |
| | | int x, int y, int c, int w, int read_pal_component, |
| | | int dst_element_size); |
| | | |
| | | void av_read_image_line(uint16_t *dst, const uint8_t *data[4], |
| | | const int linesize[4], const AVPixFmtDescriptor *desc, |
| | | int x, int y, int c, int w, int read_pal_component); |
| | |
| | | * @param y the vertical coordinate of the first pixel to write |
| | | * @param w the width of the line to write, that is the number of |
| | | * values to write to the image line |
| | | * @param src_element_size size of elements in src array (2 or 4 byte) |
| | | */ |
| | | void av_write_image_line2(const void *src, uint8_t *data[4], |
| | | const int linesize[4], const AVPixFmtDescriptor *desc, |
| | | int x, int y, int c, int w, int src_element_size); |
| | | |
| | | void av_write_image_line(const uint16_t *src, uint8_t *data[4], |
| | | const int linesize[4], const AVPixFmtDescriptor *desc, |
| | | int x, int y, int c, int w); |
| | |
| | | * This is stored as BGRA on little-endian CPU architectures and ARGB on |
| | | * big-endian CPUs. |
| | | * |
| | | * @note |
| | | * If the resolution is not a multiple of the chroma subsampling factor |
| | | * then the chroma plane resolution must be rounded up. |
| | | * |
| | | * @par |
| | | * When the pixel format is palettized RGB32 (AV_PIX_FMT_PAL8), the palettized |
| | | * image data is stored in AVFrame.data[0]. The palette is transported in |
| | |
| | | */ |
| | | AV_PIX_FMT_OPENCL, |
| | | |
| | | AV_PIX_FMT_GRAY14BE, ///< Y , 14bpp, big-endian |
| | | AV_PIX_FMT_GRAY14LE, ///< Y , 14bpp, little-endian |
| | | |
| | | AV_PIX_FMT_GRAYF32BE, ///< IEEE-754 single precision Y, 32bpp, big-endian |
| | | AV_PIX_FMT_GRAYF32LE, ///< IEEE-754 single precision Y, 32bpp, little-endian |
| | | |
| | | AV_PIX_FMT_YUVA422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), 12b alpha, big-endian |
| | | AV_PIX_FMT_YUVA422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), 12b alpha, little-endian |
| | | AV_PIX_FMT_YUVA444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), 12b alpha, big-endian |
| | | AV_PIX_FMT_YUVA444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), 12b alpha, little-endian |
| | | |
| | | AV_PIX_FMT_NV24, ///< planar YUV 4:4:4, 24bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V) |
| | | AV_PIX_FMT_NV42, ///< as above, but U and V bytes are swapped |
| | | |
| | | AV_PIX_FMT_NB ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions |
| | | }; |
| | | |
| | |
| | | #define AV_PIX_FMT_GRAY9 AV_PIX_FMT_NE(GRAY9BE, GRAY9LE) |
| | | #define AV_PIX_FMT_GRAY10 AV_PIX_FMT_NE(GRAY10BE, GRAY10LE) |
| | | #define AV_PIX_FMT_GRAY12 AV_PIX_FMT_NE(GRAY12BE, GRAY12LE) |
| | | #define AV_PIX_FMT_GRAY14 AV_PIX_FMT_NE(GRAY14BE, GRAY14LE) |
| | | #define AV_PIX_FMT_GRAY16 AV_PIX_FMT_NE(GRAY16BE, GRAY16LE) |
| | | #define AV_PIX_FMT_YA16 AV_PIX_FMT_NE(YA16BE, YA16LE) |
| | | #define AV_PIX_FMT_RGB48 AV_PIX_FMT_NE(RGB48BE, RGB48LE) |
| | |
| | | #define AV_PIX_FMT_GBRPF32 AV_PIX_FMT_NE(GBRPF32BE, GBRPF32LE) |
| | | #define AV_PIX_FMT_GBRAPF32 AV_PIX_FMT_NE(GBRAPF32BE, GBRAPF32LE) |
| | | |
| | | #define AV_PIX_FMT_GRAYF32 AV_PIX_FMT_NE(GRAYF32BE, GRAYF32LE) |
| | | |
| | | #define AV_PIX_FMT_YUVA420P9 AV_PIX_FMT_NE(YUVA420P9BE , YUVA420P9LE) |
| | | #define AV_PIX_FMT_YUVA422P9 AV_PIX_FMT_NE(YUVA422P9BE , YUVA422P9LE) |
| | | #define AV_PIX_FMT_YUVA444P9 AV_PIX_FMT_NE(YUVA444P9BE , YUVA444P9LE) |
| | | #define AV_PIX_FMT_YUVA420P10 AV_PIX_FMT_NE(YUVA420P10BE, YUVA420P10LE) |
| | | #define AV_PIX_FMT_YUVA422P10 AV_PIX_FMT_NE(YUVA422P10BE, YUVA422P10LE) |
| | | #define AV_PIX_FMT_YUVA444P10 AV_PIX_FMT_NE(YUVA444P10BE, YUVA444P10LE) |
| | | #define AV_PIX_FMT_YUVA422P12 AV_PIX_FMT_NE(YUVA422P12BE, YUVA422P12LE) |
| | | #define AV_PIX_FMT_YUVA444P12 AV_PIX_FMT_NE(YUVA444P12BE, YUVA444P12LE) |
| | | #define AV_PIX_FMT_YUVA420P16 AV_PIX_FMT_NE(YUVA420P16BE, YUVA420P16LE) |
| | | #define AV_PIX_FMT_YUVA422P16 AV_PIX_FMT_NE(YUVA422P16BE, YUVA422P16LE) |
| | | #define AV_PIX_FMT_YUVA444P16 AV_PIX_FMT_NE(YUVA444P16BE, YUVA444P16LE) |
| | |
| | | void (*free_func)(void *msg)); |
| | | |
| | | /** |
| | | * Return the current number of messages in the queue. |
| | | * |
| | | * @return the current number of messages or AVERROR(ENOSYS) if lavu was built |
| | | * without thread support |
| | | */ |
| | | int av_thread_message_queue_nb_elems(AVThreadMessageQueue *mq); |
| | | |
| | | /** |
| | | * Flush the message queue |
| | | * |
| | | * This function is mostly equivalent to reading and free-ing every message |
New file |
| | |
| | | /* |
| | | * This file is part of FFmpeg. |
| | | * |
| | | * FFmpeg is free software; you can redistribute it and/or |
| | | * modify it under the terms of the GNU Lesser General Public |
| | | * License as published by the Free Software Foundation; either |
| | | * version 2.1 of the License, or (at your option) any later version. |
| | | * |
| | | * FFmpeg is distributed in the hope that it will be useful, |
| | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| | | * Lesser General Public License for more details. |
| | | * |
| | | * You should have received a copy of the GNU Lesser General Public |
| | | * License along with FFmpeg; if not, write to the Free Software |
| | | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| | | */ |
| | | |
| | | #ifndef AVUTIL_TX_H |
| | | #define AVUTIL_TX_H |
| | | |
| | | #include <stdint.h> |
| | | #include <stddef.h> |
| | | |
| | | typedef struct AVTXContext AVTXContext; |
| | | |
| | | typedef struct AVComplexFloat { |
| | | float re, im; |
| | | } AVComplexFloat; |
| | | |
| | | enum AVTXType { |
| | | /** |
| | | * Standard complex to complex FFT with sample data type AVComplexFloat. |
| | | * Scaling currently unsupported |
| | | */ |
| | | AV_TX_FLOAT_FFT = 0, |
| | | /** |
| | | * Standard MDCT with sample data type of float and a scale type of |
| | | * float. Length is the frame size, not the window size (which is 2x frame) |
| | | */ |
| | | AV_TX_FLOAT_MDCT = 1, |
| | | }; |
| | | |
| | | /** |
| | | * Function pointer to a function to perform the transform. |
| | | * |
| | | * @note Using a different context than the one allocated during av_tx_init() |
| | | * is not allowed. |
| | | * |
| | | * @param s the transform context |
| | | * @param out the output array |
| | | * @param in the input array |
| | | * @param stride the input or output stride (depending on transform direction) |
| | | * in bytes, currently implemented for all MDCT transforms |
| | | */ |
| | | typedef void (*av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride); |
| | | |
| | | /** |
| | | * Initialize a transform context with the given configuration |
| | | * Currently power of two lengths from 4 to 131072 are supported, along with |
| | | * any length decomposable to a power of two and either 3, 5 or 15. |
| | | * |
| | | * @param ctx the context to allocate, will be NULL on error |
| | | * @param tx pointer to the transform function pointer to set |
| | | * @param type type the type of transform |
| | | * @param inv whether to do an inverse or a forward transform |
| | | * @param len the size of the transform in samples |
| | | * @param scale pointer to the value to scale the output if supported by type |
| | | * @param flags currently unused |
| | | * |
| | | * @return 0 on success, negative error code on failure |
| | | */ |
| | | int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, |
| | | int inv, int len, const void *scale, uint64_t flags); |
| | | |
| | | /** |
| | | * Frees a context and sets ctx to NULL, does nothing when ctx == NULL |
| | | */ |
| | | void av_tx_uninit(AVTXContext **ctx); |
| | | |
| | | #endif /* AVUTIL_TX_H */ |
| | |
| | | */ |
| | | |
| | | #define LIBAVUTIL_VERSION_MAJOR 56 |
| | | #define LIBAVUTIL_VERSION_MINOR 14 |
| | | #define LIBAVUTIL_VERSION_MINOR 31 |
| | | #define LIBAVUTIL_VERSION_MICRO 100 |
| | | |
| | | #define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \ |
| | |
| | | #include "libavutil/version.h" |
| | | |
| | | #define LIBSWSCALE_VERSION_MAJOR 5 |
| | | #define LIBSWSCALE_VERSION_MINOR 1 |
| | | #define LIBSWSCALE_VERSION_MINOR 5 |
| | | #define LIBSWSCALE_VERSION_MICRO 100 |
| | | |
| | | #define LIBSWSCALE_VERSION_INT AV_VERSION_INT(LIBSWSCALE_VERSION_MAJOR, \ |
| | |
| | | pthread_mutex_lock(&mtx); |
| | | if (q.empty()) { |
| | | gettimeofday(&now, NULL); |
| | | t.tv_sec = now.tv_sec + 5; |
| | | t.tv_sec = now.tv_sec + 3; |
| | | t.tv_nsec = now.tv_usec * 1000; |
| | | // pthread_cond_wait(&cond, &mtx); |
| | | pthread_cond_timedwait(&cond, &mtx, &t); |
| | |
| | | #define __LIB_RTSP_H__
|
| | |
|
| | |
|
| | | #define RTSP_ERR_OK 0 //成功
|
| | | //错误码
|
| | | #define RTSP_ERR_PARAM -1001 //参数错误
|
| | | #define RTSP_ERR_TIMEOUT -1002 //超时
|
| | | #define RTSP_ERR_OPTIONS -1003 //options 请求失败
|
| | | #define RTSP_ERR_DESCRIBE -1004 //describe请求失败
|
| | | #define RTSP_ERR_SETUP -1005 //setup请求失败
|
| | | #define RTSP_ERR_PLAY -1006 //play请求失败
|
| | | #define RTSP_ERR_PAUSE -1007 //pause请求失败
|
| | | #define RTSP_ERR_TEARDOWN -1008 //teardown请求失败
|
| | | #define RTSP_ERR_NO_MEMORY -1009 //申请内存失败
|
| | | #define RTSP_ERR_CONNECT -1010 //connect失败
|
| | | #define RTSP_ERR_INITPORT -1011 //初始化端口失败
|
| | | #define RTSP_ERR_OK 0 //成功
|
| | | //错误码
|
| | | #define RTSP_ERR_PARAM -1001 //参数错误
|
| | | #define RTSP_ERR_TIMEOUT -1002 //超时
|
| | | #define RTSP_ERR_OPTIONS -1003 //options 请求失败
|
| | | #define RTSP_ERR_DESCRIBE -1004 //describe请求失败
|
| | | #define RTSP_ERR_SETUP -1005 //setup请求失败
|
| | | #define RTSP_ERR_PLAY -1006 //play请求失败
|
| | | #define RTSP_ERR_PAUSE -1007 //pause请求失败
|
| | | #define RTSP_ERR_TEARDOWN -1008 //teardown请求失败
|
| | | #define RTSP_ERR_NO_MEMORY -1009 //申请内存失败
|
| | | #define RTSP_ERR_CONNECT -1010 //connect失败
|
| | | #define RTSP_ERR_INITPORT -1011 //初始化端口失败
|
| | |
|
| | | //码流传输方式
|
| | | //码流传输方式
|
| | | typedef enum
|
| | | {
|
| | | E_STREAM_TRANS_UDP = 1, //UDP传输码流
|
| | | E_STREAM_TRANS_TCPACTIVE = 2, //GB28181 TCP主动 码流传输方式 TcpClient
|
| | | E_STREAM_TRANS_TCPPASSIVE = 3, //GB28181 TCP被动 码流传输方式 TcpServer
|
| | | E_STREAM_TRANS_UDP = 1, //UDP传输码流
|
| | | E_STREAM_TRANS_TCPACTIVE = 2, //GB28181 TCP主动 码流传输方式 TcpClient
|
| | | E_STREAM_TRANS_TCPPASSIVE = 3, //GB28181 TCP被动 码流传输方式 TcpServer
|
| | | }StreamTransType_E;
|
| | |
|
| | | //视频请求类型
|
| | | //视频请求类型
|
| | | typedef enum
|
| | | {
|
| | | E_VIDEO_REQUEST_REALPLAY = 1, //请求实时视频
|
| | | E_VIDEO_REQUEST_PLAYBACK = 2, //请求历史视频
|
| | | E_VIDEO_REQUEST_DOWNLOAD = 3, //历史视频下载
|
| | | E_VIDEO_REQUEST_REALPLAY = 1, //请求实时视频
|
| | | E_VIDEO_REQUEST_PLAYBACK = 2, //请求历史视频
|
| | | E_VIDEO_REQUEST_DOWNLOAD = 3, //历史视频下载
|
| | | }VideoRequestType_E;
|
| | |
|
| | | #define HIS_VIDEO_CTRL_PLAY 1 //点播播放控制
|
| | | #define HIS_VIDEO_CTRL_FAST 2 //点播快放控制 参数范围:1-32倍
|
| | | #define HIS_VIDEO_CTRL_SLOW 3 //点播慢放控制 参数范围:1-32倍
|
| | | #define HIS_VIDEO_CTRL_PAUSE 4 //点播暂停控制
|
| | | #define HIS_VIDEO_CTRL_JUMP 5 //点播跳转控制 参数范围:从开始计算跳转的时间 (时间单位:秒) |
| | | #define HIS_VIDEO_CTRL_PLAY 1 //点播播放控制
|
| | | #define HIS_VIDEO_CTRL_FAST 2 //点播快放控制 参数范围:1-32倍
|
| | | #define HIS_VIDEO_CTRL_SLOW 3 //点播慢放控制 参数范围:1-32倍
|
| | | #define HIS_VIDEO_CTRL_PAUSE 4 //点播暂停控制
|
| | | #define HIS_VIDEO_CTRL_JUMP 5 //点播跳转控制 参数范围:从开始计算跳转的时间 (时间单位:秒) |
| | |
|
| | | // 码流回调的数据类型 |
| | | // 码流回调的数据类型 |
| | | #define GB_VIDEO_STREAM_H264 0
|
| | | #define GB_VIDEO_STREAM_MPEG2 1 // MPEG4
|
| | | #define GB_VIDEO_STREAM_MPEG4 2 // MPEG4
|
| | |
| | | #define GB_VIDEO_STREAM_3GP 4 // 3GP
|
| | | #define GB_VIDEO_STREAM_H265 5 //H265
|
| | |
|
| | | // 码流回调帧数据类型 I帧 P帧 目前只判断是否为I帧
|
| | | // 码流回调帧数据类型 I帧 P帧 目前只判断是否为I帧
|
| | | #define GB_VIDEO_FRAME_I 1
|
| | | #define GB_VIDEO_FRAME_P 2
|
| | |
|
| | | ////////////////////////////////////////////////////////////////////////////////
|
| | | // 函数名:码流回调函数
|
| | | // 描述:
|
| | | // 参数:datatype:码流格式数据类型(H264 H265等) frametype:帧类型(I帧 P帧)
|
| | | // datalen:长度 data:码流数据 userdata:用户指针
|
| | | // 函数名:码流回调函数
|
| | | // 描述:
|
| | | // 参数:datatype:码流格式数据类型(H264 H265等) frametype:帧类型(I帧 P帧)
|
| | | // datalen:长度 data:码流数据 userdata:用户指针
|
| | | //
|
| | | // 返回值:会话句柄
|
| | | // 。
|
| | | // 说明:
|
| | | // 保留原模块接口。
|
| | | // 返回值:会话句柄
|
| | | // 。
|
| | | // 说明:
|
| | | // 保留原模块接口。
|
| | | ////////////////////////////////////////////////////////////////////////////////
|
| | | typedef void (*PlayCallBack2)(int datatype, int frametype, unsigned char *data, unsigned int datalen, long userdata);
|
| | |
|
| | |
| | | #endif
|
| | |
|
| | | ////////////////////////////////////////////////////////////////////////////////
|
| | | // 函数名:RTSPSTREAM_Open
|
| | | // 描述:设置回调函数。
|
| | | // 参数:rtsp地址、 码流回调函数、用户指针
|
| | | // 函数名:RTSPSTREAM_Open
|
| | | // 描述:设置回调函数。
|
| | | // 参数:rtsp地址、 码流回调函数、用户指针
|
| | | //
|
| | | //
|
| | | // 返回值:会话句柄
|
| | | // 。
|
| | | // 说明:
|
| | | // 保留原模块接口。
|
| | | // 返回值:会话句柄
|
| | | // 。
|
| | | // 说明:
|
| | | // 保留原模块接口。
|
| | | ////////////////////////////////////////////////////////////////////////////////
|
| | | long RTSPSTREAM_Open(const char *rtspurl, PlayCallBack2 streamcallback, long userdata);
|
| | |
|
| | | ////////////////////////////////////////////////////////////////////////////////
|
| | | // 函数名:RTSPSTREAM_Contrl
|
| | | // 描述:设置回调函数。
|
| | | // 参数:会话句柄, 控制类型, 控制参数
|
| | | // 函数名:RTSPSTREAM_Contrl
|
| | | // 描述:设置回调函数。
|
| | | // 参数:会话句柄, 控制类型, 控制参数
|
| | | //
|
| | | //
|
| | | // 返回值:
|
| | | // 。
|
| | | // 说明:
|
| | | // 保留原模块接口。
|
| | | // 返回值:
|
| | | // 。
|
| | | // 说明:
|
| | | // 保留原模块接口。
|
| | | ////////////////////////////////////////////////////////////////////////////////
|
| | | long RTSPSTREAM_Contrl(long handle, int ctrltype, double ctrlparam);
|
| | |
|
| | | ////////////////////////////////////////////////////////////////////////////////
|
| | | // 函数名:RTSPSTREAM_Close
|
| | | // 描述:设置回调函数。
|
| | | // 参数:会话句柄
|
| | | // 函数名:RTSPSTREAM_Close
|
| | | // 描述:设置回调函数。
|
| | | // 参数:会话句柄
|
| | | //
|
| | | //
|
| | | // 返回值:
|
| | | // 。
|
| | | // 说明:
|
| | | // 保留原模块接口。
|
| | | // 返回值:
|
| | | // 。
|
| | | // 说明:
|
| | | // 保留原模块接口。
|
| | | ////////////////////////////////////////////////////////////////////////////////
|
| | | long RTSPSTREAM_Close(long handle);
|
| | |
|
| | |
| | | { |
| | | decoder::decoder(ffwrapper::FormatIn *dec) |
| | | :decRef_(dec) |
| | | ,next_idx_(-1) |
| | | {} |
| | | |
| | | decoder::~decoder(){ |
| | | |
| | | std::lock_guard<std::mutex> l(mutex_frm_); |
| | | for(auto i : list_frm_){ |
| | | free(i.data); |
| | | } |
| | | list_frm_.clear(); |
| | | |
| | | std::lock_guard<std::mutex> l(mutex_pkt_); |
| | | list_pkt_.clear(); |
| | | } |
| | | |
| | | int decoder::initDecoder(){ |
| | |
| | | return 0; |
| | | } |
| | | |
| | | int decoder::saveFrame(AVFrame *frame, const int64_t &id){ |
| | | FRM frm; |
| | | frm.width = frame->width; |
| | | frm.height = frame->height; |
| | | frm.format = frame->format; |
| | | frm.id = id; |
| | | frm.data = cvbridge::extractFrame(frame, &frm.length); |
| | | |
| | | std::lock_guard<std::mutex> l(mutex_frm_); |
| | | while(list_frm_.size() > 50){ |
| | | for(int i = 0; i < 12; i++){ |
| | | auto t = list_frm_.front(); |
| | | free(t.data); |
| | | list_frm_.pop_front(); |
| | | } |
| | | } |
| | | if (!frm.data) return 0; |
| | | list_frm_.push_back(frm); |
| | | return list_frm_.size(); |
| | | } |
| | | |
| | | int decoder::SetFrame(const CPacket &pkt){ |
| | | auto data = pkt.data; |
| | | |
| | |
| | | if (initDecoder() != 0) return -30; |
| | | } |
| | | |
| | | AVFrame *frame = av_frame_alloc(); |
| | | AVPacket np(data->getAVPacket()); |
| | | av_copy_packet(&np, &data->getAVPacket()); |
| | | auto ret = decRef_->decode(frame, &np); |
| | | av_packet_unref(&np); |
| | | |
| | | if (ret == 0){ |
| | | saveFrame(frame, pkt.v_id); |
| | | std::lock_guard<std::mutex> l(mutex_pkt_); |
| | | if (data->getAVPacket().flags & AV_PKT_FLAG_KEY){ |
| | | list_pkt_.clear(); |
| | | } |
| | | av_frame_free(&frame); |
| | | return ret; |
| | | list_pkt_.push_back(pkt); |
| | | |
| | | return list_pkt_.size(); |
| | | } |
| | | |
| | | void decoder::GetFrame(unsigned char **data, int *w, int *h, int *format, int *length, int64_t *id){ |
| | | |
| | | std::lock_guard<std::mutex> l(mutex_frm_); |
| | | if(list_frm_.empty()){ |
| | | *data = NULL; |
| | | *w = *h = 0; |
| | | *id = -1; |
| | | return; |
| | | AVFrame *frame = NULL; |
| | | |
| | | { |
| | | std::lock_guard<std::mutex> l(mutex_pkt_); |
| | | if (list_pkt_.empty()) return; |
| | | auto check = list_pkt_.front(); |
| | | if (check.id > next_idx_){ |
| | | next_idx_ = -1; |
| | | logIt("decoder new list cpacket"); |
| | | } |
| | | |
| | | for (auto &i : list_pkt_){ |
| | | if (i.id < next_idx_){ |
| | | continue; |
| | | } |
| | | |
| | | *id = i.v_id; |
| | | auto data = i.data; |
| | | |
| | | AVFrame *frm = av_frame_alloc(); |
| | | AVPacket np(data->getAVPacket()); |
| | | av_copy_packet(&np, &data->getAVPacket()); |
| | | auto ret = decRef_->decode(frm, &np); |
| | | av_packet_unref(&np); |
| | | if (ret == 0){ |
| | | next_idx_ = i.id + 1; |
| | | if (frame) {av_frame_free(&frame); frame = NULL;} |
| | | frame = frm; |
| | | } |
| | | } |
| | | } |
| | | auto p = list_frm_.front(); |
| | | list_frm_.pop_front(); |
| | | *data = p.data; |
| | | *id = p.id; |
| | | *w = p.width; |
| | | *h = p.height; |
| | | *format = p.format; |
| | | *length = p.length; |
| | | if (!frame) return; |
| | | |
| | | int pix_fmt = frame->format; |
| | | int width = frame->width; |
| | | int height = frame->height; |
| | | int len = 0; |
| | | |
| | | uint8_t *origin = cvbridge::extractFrame(frame, &len); |
| | | av_frame_free(&frame); |
| | | if (!origin) return; |
| | | |
| | | uint8_t *finale = NULL; |
| | | if (pix_fmt != AV_PIX_FMT_NV12){ |
| | | finale = (uint8_t*)malloc(len); |
| | | |
| | | unsigned char* SrcU = origin + width * height; |
| | | unsigned char* SrcV = SrcU + width * height / 4 ; |
| | | unsigned char* DstU = finale + width * height; |
| | | memcpy(finale, origin, width * height); |
| | | int i = 0; |
| | | for( i = 0 ; i < width * height / 4 ; i++ ){ |
| | | *(DstU++) = *(SrcU++); |
| | | *(DstU++) = *(SrcV++); |
| | | } |
| | | free(origin); |
| | | }else{ |
| | | finale = origin; |
| | | } |
| | | |
| | | *data = finale; |
| | | *w = width; |
| | | *h = height; |
| | | *format = pix_fmt; |
| | | *length = len; |
| | | } |
| | | |
| | | } // namespace cffmpeg_wrap |
| | |
| | | |
| | | namespace cffmpeg_wrap |
| | | { |
| | | typedef struct _frm{ |
| | | uint8_t *data; |
| | | int length; |
| | | int width; |
| | | int height; |
| | | int format; |
| | | int64_t id; |
| | | }FRM; |
| | | |
| | | class decoder |
| | | { |
| | | private: |
| | | |
| | | ffwrapper::FormatIn *decRef_; |
| | | |
| | | std::list<FRM> list_frm_; |
| | | std::mutex mutex_frm_; |
| | | |
| | | std::list<CPacket> list_pkt_; |
| | | std::mutex mutex_pkt_; |
| | | |
| | | int64_t next_idx_; |
| | | |
| | | private: |
| | | int initDecoder(); |
| | | int saveFrame(AVFrame *frame, const int64_t &id); |
| | | public: |
| | | void Start(); |
| | | int SetFrame(const CPacket &pkt); |
| | |
| | | using namespace ffwrapper; |
| | | using namespace cffmpeg_wrap::buz; |
| | | |
| | | static const int cache_time = 6 * 60; |
| | | static const int cache_time = 3 * 60 + 30; |
| | | |
| | | namespace cffmpeg_wrap |
| | | { |
| | |
| | | |
| | | void rec::SetRecMinCacheTime(const int min){ |
| | | // 由于整个流程耗时,补偿time_offset_ |
| | | int fps = 25; |
| | | if (recRef_){ |
| | | fps = recRef_->getFPS(); |
| | | } |
| | | min_cache_len_ += min * fps; |
| | | // int fps = 25; |
| | | // if (recRef_){ |
| | | // fps = recRef_->getFPS(); |
| | | // } |
| | | // if (min_cache_len_ > (cache_time+min) * fps){ |
| | | // return; |
| | | // } |
| | | // min_cache_len_ += min * fps; |
| | | } |
| | | |
| | | int rec::shrinkCache(){ |
| | | //超过最大缓存,丢弃gop |
| | | |
| | | while (list_pkt_.size() > min_cache_len_) { |
| | | list_pkt_.pop_front(); |
| | | while(!list_pkt_.empty()){ |
| | |
| | | ,gb_(0) |
| | | ,cpu_(0) |
| | | ,run_dec_(false) |
| | | ,run_stream_(true) |
| | | ,run_rec_(false) |
| | | ,thread_(nullptr) |
| | | ,stop_stream_(false) |
| | | ,stream_(nullptr) |
| | |
| | | ,gb_(0) |
| | | ,cpu_(0) |
| | | ,run_dec_(false) |
| | | ,run_stream_(true) |
| | | ,run_rec_(false) |
| | | ,thread_(nullptr) |
| | | ,stop_stream_(false) |
| | | ,stream_(nullptr) |
| | |
| | | p.pts = p.dts = AV_NOPTS_VALUE; |
| | | } |
| | | int flag = 0; |
| | | if (stream_) stream_->SetPacket(pkt); |
| | | if (decoder_ && run_dec_) flag = decoder_->SetFrame(pkt); |
| | | if (rec_->Loaded()) rec_->SetPacket(pkt); |
| | | if (run_stream_ && stream_) stream_->SetPacket(pkt); |
| | | if (run_dec_ && decoder_) flag = decoder_->SetFrame(pkt); |
| | | if (run_rec_ && rec_->Loaded()) rec_->SetPacket(pkt); |
| | | |
| | | return flag; |
| | | } |
| | |
| | | int64_t v_id = id; |
| | | int64_t a_id = id; |
| | | |
| | | bool exist = access(input_url_.c_str(), 0) == 0 ? true : false; |
| | | bool exist = access(input_url_.c_str(), 0) == 0; |
| | | |
| | | while(!stop_stream_.load()){ |
| | | auto data(std::make_shared<CodedData>()); |
| | |
| | | } |
| | | |
| | | deinit_worker(); |
| | | if (exist) break; |
| | | } |
| | | } |
| | | |
| | | void Wrapper::SetRecMinCacheTime(const int mind){ |
| | | rec_->SetRecMinCacheTime(mind); |
| | | } |
| | | |
| | | void Wrapper::OpenRecorder(){ |
| | | run_rec_ = true; |
| | | } |
| | | |
| | | void Wrapper::BuildRecorder(const char* id, const char *output, const int64_t &fid, const int mindur, const int maxdur, const bool audio){ |
| | |
| | | [=]{rec_->NewRec(rid.c_str(), dir.c_str(), fid, mindur, maxdur, a);}; |
| | | } |
| | | } |
| | | |
| | | int Wrapper::FireRecorder(const char* sid,const int64_t &id){ |
| | | if (rec_->Loaded()){ |
| | | rec_->FireRecSignal(sid, id); |
| | |
| | | } |
| | | } |
| | | ////////decoder |
| | | void Wrapper::BuildDecoder(){ |
| | | void Wrapper::OpenDecoder(){ |
| | | run_dec_ = true; |
| | | } |
| | | |
| | |
| | | } |
| | | } |
| | | |
| | | void Wrapper::CloseStream(){ |
| | | run_stream_ = false; |
| | | } |
| | | |
| | | void Wrapper::GetPacket(unsigned char **pktData, int *size, int *key){ |
| | | if (stream_){ |
| | | stream_->GetPacket(pktData, size, key); |
| | |
| | | |
| | | // return val: -1 open error; -2, find stream error; -3, converter create |
| | | namespace cffmpeg_wrap{ // start test functions |
| | | uint8_t* Decode(const char *file, const int gb, int *w, int *h){ |
| | | VideoProp prop; |
| | | prop.url_ = file; |
| | | prop.gpu_acc_ = false; |
| | | |
| | | std::unique_ptr<FormatIn> in(new FormatIn(prop.gpuAccl())); |
| | | int flag = -1; |
| | | if (gb){ |
| | | flag = in->openGb28181(file, NULL); |
| | | }else{ |
| | | flag = in->open(file, NULL); |
| | | } |
| | | |
| | | std::unique_ptr<cvbridge> bridge_(nullptr); |
| | | |
| | | if(flag == 0){ |
| | | if(!in->findStreamInfo(NULL)){ |
| | | logIt("yolo can't find video stream\n"); |
| | | *w = *h = -2; |
| | | return NULL; |
| | | } |
| | | auto flag = in->openCodec(NULL); |
| | | if(flag){ |
| | | auto dec_ctx = in->getCodecContext(); |
| | | |
| | | AVPixelFormat pix_fmt = AV_PIX_FMT_BGR24; |
| | | bridge_.reset(new cvbridge( |
| | | dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt, |
| | | dec_ctx->width, dec_ctx->height, pix_fmt, SWS_BICUBIC)); |
| | | |
| | | }else{ |
| | | logIt("FormatIn openCodec Failed!"); |
| | | *w = *h = -3; |
| | | return NULL; |
| | | } |
| | | }else{ |
| | | logIt("open %s error", file); |
| | | *w = *h = -1; |
| | | return NULL; |
| | | } |
| | | |
| | | uint8_t *pic = NULL; |
| | | *w = *h = 0; |
| | | |
| | | int tryTime = 0; |
| | | while (tryTime++ < 100){ |
| | | |
| | | auto data(std::make_shared<CodedData>()); |
| | | if (in->readPacket(&data->getAVPacket()) == 0){ |
| | | |
| | | auto frame(std::make_shared<FrameData>()); |
| | | AVFrame *frm = frame->getAVFrame(); |
| | | if(in->decode(frm, &data->getAVPacket()) == 0){ |
| | | *w = frm->width; |
| | | *h = frm->height; |
| | | pic = bridge_->convert2Data(frm); |
| | | break; |
| | | } |
| | | } |
| | | } |
| | | |
| | | return pic; |
| | | } |
| | | /////// for encoder |
| | | typedef struct _PicEncoder{ |
| | | FormatOut *enc; |
| | |
| | | int fps; |
| | | int br; |
| | | int gi; |
| | | int pix_fmt; |
| | | int flag; |
| | | cvbridge *bridge; |
| | | } PicEncoder; |
| | | |
| | | void *CreateEncoder(const int w, const int h, const int fps, const int br, const int scale_flag, const int gi){ |
| | | void *CreateEncoder(const int w, const int h, const int fps, const int br, |
| | | const int pix_fmt, const int scale_flag, const int gi){ |
| | | |
| | | PicEncoder *e = (PicEncoder*)malloc(sizeof(PicEncoder)); |
| | | e->enc = NULL; |
| | |
| | | e->fps = fps; |
| | | e->br = br; |
| | | e->gi = gi; |
| | | e->pix_fmt = pix_fmt; |
| | | e->flag = scale_flag; |
| | | e->bridge = NULL; |
| | | |
| | |
| | | PicEncoder *e = (PicEncoder*)hdl; |
| | | auto ctx = e->enc->getCodecContext(); |
| | | |
| | | AVPixelFormat pix_fmt = AV_PIX_FMT_BGR24; |
| | | if (e->bridge == NULL){ |
| | | e->bridge = new cvbridge( |
| | | w, h, AV_PIX_FMT_BGR24, |
| | | w, h, e->pix_fmt, |
| | | e->w, e->h, ctx->pix_fmt, e->flag); |
| | | } |
| | | |
| | | AVFrame *frame = cvbridge::fillFrame(in, w, h, pix_fmt); |
| | | AVFrame *bgr_frame = cvbridge::fillFrame(in, w, h, e->pix_fmt); |
| | | AVFrame *frame = e->bridge->convert2Frame(bgr_frame); |
| | | av_frame_free(&bgr_frame); |
| | | |
| | | AVPacket *pkt = av_packet_alloc(); |
| | | |
| | | auto flag = e->enc->encode(pkt, frame); |
| | |
| | | return flag; |
| | | } |
| | | |
| | | /////////////////////////////////////////////////////////// |
| | | typedef struct _conv |
| | | { |
| | | int srcW; |
| | | int srcH; |
| | | int srcF; |
| | | int dstW; |
| | | int dstH; |
| | | cvbridge *b; |
| | | }Conv; |
| | | |
| | | void *CreateConvertor(const int srcW, const int srcH, const int srcFormat, |
| | | const int dstW, const int dstH, const int dstFormat, const int flag){ |
| | | |
| | | auto bridge = new cvbridge( |
| | | srcW, srcH, srcFormat, |
| | | dstW, dstH, dstFormat, flag); |
| | | if (!bridge) return NULL; |
| | | |
| | | Conv *c = (Conv*)malloc(sizeof(Conv)); |
| | | c->b = bridge; |
| | | c->dstW = dstW; |
| | | c->dstH = dstH; |
| | | c->srcW = srcW; |
| | | c->srcH = srcH; |
| | | c->srcF = srcFormat; |
| | | |
| | | return c; |
| | | } |
| | | |
| | | uint8_t *Convert(void *h, uint8_t *src){ |
| | | Conv *c = (Conv*)h; |
| | | |
| | | auto b = c->b; |
| | | |
| | | AVFrame *tmp_frm = cvbridge::fillFrame(src, c->srcW, c->srcH, c->srcF); |
| | | if (!tmp_frm) return NULL; |
| | | |
| | | unsigned char *picData = b->convert2Data(tmp_frm); |
| | | |
| | | av_frame_free(&tmp_frm); |
| | | |
| | | return picData; |
| | | } |
| | | |
| | | void DestoryConvertor(void *h){ |
| | | Conv *c = (Conv*)h; |
| | | delete c->b; |
| | | free(c); |
| | | } |
| | | |
| | | int GetGb28181Pic(const char *rtspUrl, char *retData, int *retDataLen){ |
| | | int ret = 0; |
| | | std::string fn = rtspUrl; |
| | |
| | | handle_gb28181->deleteCamera(); |
| | | return ret; |
| | | } |
| | | |
| | | } |
| | | |
| | |
| | | |
| | | int GetFPS(){return fps_;} |
| | | public: //decoder |
| | | void BuildDecoder(); |
| | | void OpenDecoder(); |
| | | void GetPicDecoder(unsigned char **data, int *w, int *h, int *format, int *length, int64_t *id); |
| | | public: // push stream |
| | | void CloseStream(); |
| | | void GetPacket(unsigned char **pktData, int *size, int *key); |
| | | public: // recorder |
| | | void OpenRecorder(); |
| | | private: |
| | | // stream 参数 |
| | | std::string input_url_; |
| | | bool audio_; |
| | | int gb_, cpu_; |
| | | bool run_dec_; |
| | | bool run_stream_; |
| | | bool run_rec_; |
| | | // decoder 参数 |
| | | std::unique_ptr<std::thread> thread_; |
| | | std::atomic_bool stop_stream_; |
| | |
| | | void *CreateEncoder(const int w, const int h, const int fps, const int br, const int scale_flag, const int gi); |
| | | void DestroyEncoder(void *h); |
| | | int Encode(void *hdl, uint8_t *in, const int w, const int h, uint8_t **out, int *size, int *key); |
| | | |
| | | void *CreateConvertor(const int srcW, const int srcH, const int srcFormat, |
| | | const int dstW, const int dstH, const int dstFormat, const int flag); |
| | | uint8_t *Convert(void *h, uint8_t *src); |
| | | void DestoryConvertor(void *h); |
| | | |
| | | int GetGb28181Pic(const char *filename, char *retData, int *retDataLen); |
| | | } |
| | | |
| | |
| | | var srcW, srcH, srcF C.int |
| | | |
| | | p := C.wrap_fn_decoder_pic(unsafe.Pointer(libcffmpeg), h.ffmpeg, &srcW, &srcH, &srcF, &length, &fid) |
| | | if srcW == 0 || srcH == 0 { |
| | | if p == nil || length == 0 || srcW == 0 || srcH == 0 { |
| | | return nil, 0, 0, 0 |
| | | } |
| | | defer C.free(unsafe.Pointer(p)) |
| | | |
| | | return C.GoBytes(p, length), int(srcW), int(srcH), int64(fid) |
| | | } |
| | | |
| | | // GetYUV2 get yuv data |
| | | func (h *GoFFMPEG) GetYUV2() (unsafe.Pointer, []byte, int, int, int64) { |
| | | var fid C.long |
| | | var length C.int |
| | | var srcW, srcH, srcF C.int |
| | | |
| | | p := C.wrap_fn_decoder_pic(unsafe.Pointer(libcffmpeg), h.ffmpeg, &srcW, &srcH, &srcF, &length, &fid) |
| | | if p == nil || length == 0 || srcW == 0 || srcH == 0 { |
| | | return nil, nil, 0, 0, 0 |
| | | } |
| | | const maxLen = 0x7fffffff |
| | | size := int(length) |
| | | data := (*[maxLen]byte)(unsafe.Pointer(p))[:size:size] |
| | | |
| | | return unsafe.Pointer(p), data, int(srcW), int(srcH), int64(fid) |
| | | } |
| | |
| | | } |
| | | |
| | | return &GoEncoder{ |
| | | enc: C.wrap_fn_create_encoder(unsafe.Pointer(libcffmpeg), C.int(w), C.int(h), C.int(fps), C.int(br), C.int(sFlag), C.int(gi)), |
| | | enc: C.wrap_fn_create_encoder(unsafe.Pointer(libcffmpeg), C.int(w), C.int(h), C.int(fps), C.int(br), C.int(DstFormat), C.int(sFlag), C.int(gi)), |
| | | } |
| | | } |
| | | |
| | | // NewEncoderWithPixFmt origin pix_fmt |
| | | func NewEncoderWithPixFmt(w, h, fps, br, pixFmt, sFlag, gi int) *GoEncoder { |
| | | if w <= 0 || h <= 0 { |
| | | return nil |
| | | } |
| | | |
| | | return &GoEncoder{ |
| | | enc: C.wrap_fn_create_encoder(unsafe.Pointer(libcffmpeg), C.int(w), C.int(h), C.int(fps), C.int(br), C.int(pixFmt), C.int(sFlag), C.int(gi)), |
| | | } |
| | | } |
| | | |
| | |
| | | |
| | | var size C.int |
| | | var key C.int |
| | | cin := C.CBytes(in) |
| | | defer C.free(cin) |
| | | |
| | | p := C.wrap_fn_encode(unsafe.Pointer(libcffmpeg), e.enc, cin, C.int(w), C.int(h), &size, &key) |
| | | p := C.wrap_fn_encode(unsafe.Pointer(libcffmpeg), e.enc, unsafe.Pointer(&in[0]), C.int(w), C.int(h), &size, &key) |
| | | defer C.free(p) |
| | | if p != nil && size > 0 { |
| | | b := C.GoBytes(p, size) |
| | |
| | | } |
| | | return nil, 0, false |
| | | } |
| | | |
| | | // Encode2 pic |
| | | func (e *GoEncoder) Encode2(in []byte, w, h int) (unsafe.Pointer, []byte, int, bool) { |
| | | |
| | | var size C.int |
| | | var key C.int |
| | | |
| | | p := C.wrap_fn_encode(unsafe.Pointer(libcffmpeg), e.enc, unsafe.Pointer(&in[0]), C.int(w), C.int(h), &size, &key) |
| | | if p != nil && size > 0 { |
| | | |
| | | isKey := false |
| | | if key > 0 { |
| | | isKey = true |
| | | } |
| | | const maxLen = 0x7fffffff |
| | | length := int(size) |
| | | data := (*[maxLen]byte)(unsafe.Pointer(p))[:length:length] |
| | | |
| | | return p, data, length, isKey |
| | | } |
| | | return nil, nil, 0, false |
| | | } |
| | |
| | | "unsafe" |
| | | ) |
| | | |
| | | const ( |
| | | // ScaleNone self add no scale raw frame data |
| | | ScaleNone = 0 |
| | | // ScaleFastBilinear SWS_FAST_BILINEAR |
| | | ScaleFastBilinear = 1 |
| | | // ScaleBilinear SWS_BILINEAR |
| | | ScaleBilinear = 2 |
| | | // ScaleBicubic SWS_BICUBIC |
| | | ScaleBicubic = 4 |
| | | // ScaleX SWS_X |
| | | ScaleX = 8 |
| | | // ScalePoint SWS_POINT |
| | | ScalePoint = 0x10 |
| | | // ScaleArea SWS_AREA |
| | | ScaleArea = 0x20 |
| | | // ScaleBicublin SWS_BICUBLIN |
| | | ScaleBicublin = 0x40 |
| | | // ScaleGauss SWS_GAUSS |
| | | ScaleGauss = 0x80 |
| | | // ScaleSinc SWS_SINC |
| | | ScaleSinc = 0x100 |
| | | // ScaleLancZos SWS_LANCZOS |
| | | ScaleLancZos = 0x200 |
| | | // ScaleSpline SWS_SPLINE |
| | | ScaleSpline = 0x400 |
| | | ) |
| | | |
| | | // SrcFormat format NV |
| | | const SrcFormat = 23 |
| | | |
| | | // DstFormat format |
| | | const DstFormat = 3 |
| | | |
| | | var libcffmpeg C.libcffmpeg |
| | | |
| | | // InitFFmpeg init ffmepg |
| | |
| | | return int(C.wrap_fn_fps(unsafe.Pointer(libcffmpeg), h.ffmpeg)) |
| | | } |
| | | |
| | | // ReleaseC release c memory |
| | | func ReleaseC(p unsafe.Pointer) { |
| | | if p == nil { |
| | | return |
| | | } |
| | | C.free(unsafe.Pointer(p)) |
| | | } |
| | | |
| | | // GetGBJpg Get GB28181 Jpg |
| | | func GetGBJpg(rtspUrl string) []byte { |
| | | rtsp := C.CString(rtspUrl) |
| | |
| | | |
| | | import "unsafe" |
| | | |
| | | // OpenRecorder rec func open |
| | | func (h *GoFFMPEG) OpenRecorder() { |
| | | C.wrap_fn_open_rec(unsafe.Pointer(libcffmpeg), h.ffmpeg) |
| | | } |
| | | |
| | | // FireRecorder fire recorder |
| | | func (h *GoFFMPEG) FireRecorder(sid string, id int64) { |
| | | csid := C.CString(sid) |
| | |
| | | |
| | | import "unsafe" |
| | | |
| | | // CloseStream close stream |
| | | func (h *GoFFMPEG) CloseStream() { |
| | | C.wrap_fn_close_stream(unsafe.Pointer(libcffmpeg), h.ffmpeg) |
| | | } |
| | | |
| | | //GetAVPacket get AVPacket |
| | | func (h *GoFFMPEG) GetAVPacket() ([]byte, int, int) { |
| | | var key C.int |
| | | var size C.int |
| | | |
| | | p := C.wrap_fn_get_avpacket(unsafe.Pointer(libcffmpeg), h.ffmpeg, &size, &key) |
| | | defer C.free(unsafe.Pointer(p)) |
| | | if size <= 0 { |
| | | return nil, 0, -1 |
| | | } |
| | | defer C.free(unsafe.Pointer(p)) |
| | | d := C.GoBytes(p, size) |
| | | s := int(size) |
| | | k := int(key) |
| | | |
| | | return d, s, k |
| | | } |
| | | |
| | | //GetAVPacket2 get AVPacket |
| | | func (h *GoFFMPEG) GetAVPacket2() (unsafe.Pointer, []byte, int, int) { |
| | | var key C.int |
| | | var size C.int |
| | | |
| | | p := C.wrap_fn_get_avpacket(unsafe.Pointer(libcffmpeg), h.ffmpeg, &size, &key) |
| | | if size <= 0 { |
| | | return nil, nil, 0, -1 |
| | | } |
| | | |
| | | const maxLen = 0x7fffffff |
| | | length := int(size) |
| | | data := (*[maxLen]byte)(unsafe.Pointer(p))[:length:length] |
| | | |
| | | k := int(key) |
| | | |
| | | return p, data, length, k |
| | | } |
| | |
| | | fn_cpu(h); |
| | | } |
| | | |
| | | void wrap_fn_open_rec(void *lib, const cffmpeg h){ |
| | | if (!fn_open_recorder){ |
| | | fn_open_recorder = (lib_cffmpeg_open_recorder)dlsym(lib, "c_ffmpeg_open_recorder"); |
| | | if (!fn_open_recorder) return; |
| | | } |
| | | fn_open_recorder(h); |
| | | } |
| | | |
| | | void wrap_fn_recorder(void *lib, const cffmpeg h, const char* id, const char* dir, const int64_t fid, int mind, int maxd, int audio){ |
| | | if (!fn_recorder){ |
| | | fn_recorder = (lib_cffmpeg_recorder)dlsym(lib, "c_ffmpeg_build_recorder"); |
| | |
| | | return fn_decoder_pic(h, wid, hei, format, length, id); |
| | | } |
| | | |
| | | void wrap_fn_close_stream(void *lib, const cffmpeg h){ |
| | | if (!fn_close_stream){ |
| | | fn_close_stream = (lib_cffmpeg_close_stream)dlsym(lib, "c_ffmpeg_close_stream"); |
| | | if (!fn_close_stream) return; |
| | | } |
| | | fn_close_stream(h); |
| | | } |
| | | |
| | | void* wrap_fn_get_avpacket(void *lib, const cffmpeg h, int* size, int* key){ |
| | | if(!fn_get_avpacket){ |
| | | fn_get_avpacket = (lib_cffmpeg_avpacket)dlsym(lib, "c_ffmpeg_get_avpacket"); |
| | |
| | | return fn_get_avpacket(h, size, key); |
| | | } |
| | | |
| | | // return val: -1 open error; -2, find stream error; -3, converter create error |
| | | void* wrap_fn_decode(void *lib, const char* file, const int gb, int* wid, int* hei){ |
| | | if (!fn_decode){ |
| | | fn_decode = (lib_cffmpeg_decode)dlsym(lib, "c_ffmpeg_decode"); |
| | | release_if_err(fn_decode, lib); |
| | | } |
| | | return fn_decode(file, gb, wid, hei); |
| | | } |
| | | |
| | | // for encoder |
| | | cencoder wrap_fn_create_encoder(void *lib, const int w, const int h, const int fps, const int br, const int scale_flag, const int gi){ |
| | | cencoder wrap_fn_create_encoder(void *lib, const int w, const int h, const int fps, const int br, const int pix_fmt, const int scale_flag, const int gi){ |
| | | if (!fn_create_encoder){ |
| | | fn_create_encoder = (lib_cffmpeg_create_encoder)dlsym(lib, "c_ffmpeg_create_encoder"); |
| | | release_if_err(fn_create_encoder, lib); |
| | | } |
| | | return fn_create_encoder(w, h, fps, br, scale_flag, gi); |
| | | return fn_create_encoder(w, h, fps, br, pix_fmt, scale_flag, gi); |
| | | } |
| | | |
| | | void wrap_fn_destroy_encoder(void *lib, const cencoder h){ |
| | |
| | | |
| | | uint8_t *out = NULL; |
| | | const int flag = fn_encode(hdl, (uint8_t*)in, w, h, &out, out_size, key); |
| | | if (flag > 0 && out != NULL) { |
| | | if (flag == 0 && out != NULL) { |
| | | return out; |
| | | } |
| | | *out_size = 0; |
| | | *key = 0; |
| | | return NULL; |
| | | } |
| | | |
| | | // for conv |
| | | cconv wrap_fn_create_conv(void *lib, const int srcW, const int srcH, const int srcFormat, |
| | | const int dstW, const int dstH, const int dstFormat, const int flag){ |
| | | if (!fn_create_conv){ |
| | | fn_create_conv = (lib_cffmpeg_create_conv)dlsym(lib, "c_ffmpeg_create_conv"); |
| | | release_if_err(fn_create_conv, lib); |
| | | } |
| | | return fn_create_conv(srcW, srcH, srcFormat, dstW, dstH, dstFormat, flag); |
| | | } |
| | | |
| | | void wrap_fn_destroy_conv(void *lib, const cconv h){ |
| | | if (!fn_destroy_conv){ |
| | | fn_destroy_conv = (lib_cffmpeg_destroy_conv)dlsym(lib, "c_ffmpeg_destroy_conv"); |
| | | if(!fn_destroy_conv) return; |
| | | } |
| | | fn_destroy_conv(h); |
| | | } |
| | | |
| | | void* wrap_fn_conv(void *lib, const cconv h, uint8_t *in){ |
| | | if (!fn_conv){ |
| | | fn_conv = (lib_cffmpeg_conv)dlsym(lib, "c_ffmpeg_conv"); |
| | | release_if_err(fn_conv, lib); |
| | | } |
| | | |
| | | return fn_conv(h, in); |
| | | } |
| | |
| | | typedef void (*lib_cffmpeg_gb28181)(const cffmpeg); |
| | | typedef char * (*lib_cffmpeg_get_gb28181_pic)(const char *rtspUrl, int *retDataLen); |
| | | typedef void (*lib_cffmpeg_cpu)(const cffmpeg); |
| | | typedef void (*lib_cffmpeg_open_recorder)(const cffmpeg); |
| | | typedef void (*lib_cffmpeg_rec_duration)(const cffmpeg, const int, const int); |
| | | typedef void (*lib_cffmpeg_recorder)(const cffmpeg, const char*, const char*, const int64_t, int, int, int); |
| | | typedef void (*lib_cffmpeg_fire_recorder)(const cffmpeg, const char*, const int64_t); |
| | | typedef void (*lib_cffmpeg_info_recorder)(const cffmpeg, int*, char**, int*, char**, int*); |
| | | typedef void (*lib_cffmpeg_decoder)(const cffmpeg); |
| | | typedef void*(*lib_cffmpeg_pic)(const cffmpeg, int*, int*, int*, int*, int64_t*); |
| | | typedef void (*lib_cffmpeg_close_stream)(const cffmpeg); |
| | | typedef void*(*lib_cffmpeg_avpacket)(const cffmpeg, int*, int*); |
| | | typedef void*(*lib_cffmpeg_decode)(const char*, const int, int*, int*); |
| | | |
| | | static lib_cffmpeg_create fn_create = NULL; |
| | | static lib_cffmpeg_create2 fn_create2 = NULL; |
| | |
| | | static lib_cffmpeg_gb28181 fn_gb28181 = NULL; |
| | | static lib_cffmpeg_get_gb28181_pic fn_get_gb28181_pic = NULL; |
| | | static lib_cffmpeg_cpu fn_cpu = NULL; |
| | | static lib_cffmpeg_open_recorder fn_open_recorder = NULL; |
| | | static lib_cffmpeg_rec_duration fn_rec_duration = NULL; |
| | | static lib_cffmpeg_recorder fn_recorder = NULL; |
| | | static lib_cffmpeg_fire_recorder fn_fire_recorder = NULL; |
| | | static lib_cffmpeg_info_recorder fn_info_recorder = NULL; |
| | | static lib_cffmpeg_decoder fn_decoder = NULL; |
| | | static lib_cffmpeg_pic fn_decoder_pic = NULL; |
| | | static lib_cffmpeg_close_stream fn_close_stream = NULL; |
| | | static lib_cffmpeg_avpacket fn_get_avpacket = NULL; |
| | | static lib_cffmpeg_decode fn_decode = NULL; |
| | | |
| | | typedef void* libcffmpeg; |
| | | libcffmpeg init_libcffmpeg(const char *so_file); |
| | |
| | | void wrap_fn_run_gb28181(void *lib, const cffmpeg h); |
| | | char * wrap_fn_get_gb28181_pic(void *lib, const char *rtspUrl, int *retDataLen); |
| | | void wrap_fn_use_cpu(void *lib, const cffmpeg h); |
| | | void wrap_fn_open_rec(void *lib, const cffmpeg h); |
| | | void wrap_fn_rec_duration(void *lib, const cffmpeg h, const int min, const int max); |
| | | void wrap_fn_recorder(void *lib, const cffmpeg h, const char* id, const char* dir, const int64_t fid, int mind, int maxd, int audio); |
| | | void wrap_fn_fire_recorder(void *lib, const cffmpeg h, const char *sid, const int64_t id); |
| | | void wrap_fn_info_recorder(void *lib, const cffmpeg, int* index, char** recid, int* recidLen, char** fpath, int* pathLen); |
| | | void wrap_fn_decoder(void *lib, const cffmpeg h); |
| | | void* wrap_fn_decoder_pic(void *lib, const cffmpeg h, int *wid, int *hei, int *format, int *length, int64_t *id); |
| | | void wrap_fn_close_stream(void *lib, const cffmpeg h); |
| | | void* wrap_fn_get_avpacket(void *lib, const cffmpeg h, int* size, int* key); |
| | | void* wrap_fn_decode(void *lib, const char* file, const int gb, int* wid, int* hei); |
| | | // for encoder |
| | | typedef void* cencoder; |
| | | typedef cencoder (*lib_cffmpeg_create_encoder)(const int w, const int h, const int fps, const int br, const int scale_flag, const int gi); |
| | | typedef cencoder (*lib_cffmpeg_create_encoder)(const int w, const int h, const int fps, const int br, const int pix_fmt, const int scale_flag, const int gi); |
| | | typedef void (*lib_cffmpeg_destroy_encoder)(cencoder h); |
| | | typedef int (*lib_cffmpeg_encode)(cencoder hdl, uint8_t *in, const int w, const int h, uint8_t **out, int *size, int *key); |
| | | |
| | |
| | | static lib_cffmpeg_destroy_encoder fn_destroy_encoder = NULL; |
| | | static lib_cffmpeg_encode fn_encode = NULL; |
| | | |
| | | cencoder wrap_fn_create_encoder(void *lib, const int w, const int h, const int fps, const int br, const int scale_flag, const int gi); |
| | | cencoder wrap_fn_create_encoder(void *lib, const int w, const int h, const int fps, const int br, const int pix_fmt, const int scale_flag, const int gi); |
| | | void wrap_fn_destroy_encoder(void *lib, const cencoder h); |
| | | void* wrap_fn_encode(void *lib, cencoder hdl, void *in, const int w, const int h, int *out_size, int *key); |
| | | |
| | | |
| | | // for conv |
| | | typedef void *cconv; |
| | | typedef cconv (*lib_cffmpeg_create_conv)(const int, const int, const int, const int, const int, const int, const int); |
| | | typedef void* (*lib_cffmpeg_conv)(const cconv, uint8_t *in); |
| | | typedef void (*lib_cffmpeg_destroy_conv)(const cconv); |
| | | |
| | | static lib_cffmpeg_create_conv fn_create_conv = NULL; |
| | | static lib_cffmpeg_destroy_conv fn_destroy_conv = NULL; |
| | | static lib_cffmpeg_conv fn_conv = NULL; |
| | | |
| | | cconv wrap_fn_create_conv(void *lib, const int srcW, const int srcH, const int srcFormat, |
| | | const int dstW, const int dstH, const int dstFormat, const int flag); |
| | | void wrap_fn_destroy_conv(void *lib, const cconv h); |
| | | void* wrap_fn_conv(void *lib, const cconv h, uint8_t *in); |
| | | |
| | | #ifdef __cplusplus |
| | | } |