/** * 叶海辉 * QQ群121376426 * http://blog.yundiantech.com/ */ #include "VideoPlayerHK.h" #include "Audio/PcmVolumeControl.h" #include "parser/parser_callback.h" #include #include VideoPlayerHK::VideoPlayerHK() { mVideoType = NORMALVIDEO; parseCB = nullptr; initPlayer(); } VideoPlayerHK::~VideoPlayerHK() { } bool VideoPlayerHK::startPlay(const std::string &filePath, int videoType) { if (mPlayerState != VideoPlayer_Stop) { return false; } mIsQuit = false; mIsPause = false; if (!filePath.empty()) mFilePath = filePath; mVideoType = videoType; //启动新的线程实现读取视频文件 std::thread([&](VideoPlayerHK *pointer) { pointer->readVideoFile(); }, this).detach(); // readVideoFile(); parseCB = new parser_callback(&mVideoPacktList, &mAudioPacktList, mConditon_Video, mConditon_Audio); run_player(mFilePath.c_str(), mVideoType, parseCB); return true; } int64_t VideoPlayerHK::getTotalTime() { return 1; } void VideoPlayerHK::readVideoFile() { ///SDL初始化需要放入子线程中,否则有些电脑会有问题。 if (SDL_Init(SDL_INIT_AUDIO)) { doOpenSdlFailed(-100); fprintf(stderr, "Could not initialize SDL - %s. \n", SDL_GetError()); return; } mIsReadThreadFinished = false; mIsReadFinished = false; pFormatCtx = nullptr; pCodecCtx = nullptr; pCodec = nullptr; aCodecCtx = nullptr; aCodec = nullptr; aFrame = nullptr; mAudioStream = new AVStream; mVideoStream = new AVStream; audio_clock = 0; video_clock = 0; doTotalTimeChanged(getTotalTime()); ///创建一个线程专门用来解码视频 std::thread([&](VideoPlayerHK *pointer) { pointer->decodeVideoThread(); }, this).detach(); //audio decoder // find the audio decoder int codecId = AV_CODEC_ID_AMR_NB; aCodec = avcodec_find_decoder((AVCodecID)codecId); if (!aCodec) { fprintf(stderr, "Codec not found\n"); return; } aCodecCtx = avcodec_alloc_context3(aCodec); if (!aCodecCtx) { fprintf(stderr, "Could not allocate audio codec context\n"); return; } aCodecCtx->codec = aCodec; aCodecCtx->sample_fmt = AV_SAMPLE_FMT_FLT; aCodecCtx->bit_rate = 6000; aCodecCtx->sample_rate = 8000; aCodecCtx->channels = 1; aCodecCtx->channel_layout = AV_CH_LAYOUT_MONO; aCodecCtx->codec_type = AVMEDIA_TYPE_AUDIO; if (aCodec == NULL) { fprintf(stderr, "ACodec not found.\n"); } else { //open it ///打开音频解码器 if (avcodec_open2(aCodecCtx, aCodec, nullptr) < 0) { fprintf(stderr, "Could not open audio codec.\n"); doOpenVideoFileFailed(); goto end; } ///解码音频相关 aFrame = av_frame_alloc(); //重采样设置选项-----------------------------------------------------------start aFrame_ReSample = nullptr; //frame->16bit 44100 PCM 统一音频采样格式与采样率 swrCtx = nullptr; //输入的声道布局 int in_ch_layout; //输出的声道布局 int out_ch_layout = av_get_default_channel_layout(audio_tgt_channels); ///AV_CH_LAYOUT_STEREO out_ch_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX; /// 这里音频播放使用了固定的参数 /// 强制将音频重采样成44100 双声道 AV_SAMPLE_FMT_S16 /// SDL播放中也是用了同样的播放参数 //重采样设置选项---------------- //输入的采样格式 in_sample_fmt = aCodecCtx->sample_fmt; //输出的采样格式 16bit PCM out_sample_fmt = AV_SAMPLE_FMT_S16; //输入的采样率 in_sample_rate = aCodecCtx->sample_rate; //输入的声道布局 in_ch_layout = aCodecCtx->channel_layout; //输出的采样率 out_sample_rate = 44100; //输出的声道布局 audio_tgt_channels = 2; ///av_get_channel_layout_nb_channels(out_ch_layout); out_ch_layout = av_get_default_channel_layout(audio_tgt_channels); ///AV_CH_LAYOUT_STEREO out_ch_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX; /// 2019-5-13添加 /// wav/wmv 文件获取到的aCodecCtx->channel_layout为0会导致后面的初始化失败,因此这里需要加个判断。 if (in_ch_layout <= 0) { in_ch_layout = av_get_default_channel_layout(aCodecCtx->channels); } swrCtx = swr_alloc_set_opts(nullptr, out_ch_layout, out_sample_fmt, out_sample_rate, in_ch_layout, in_sample_fmt, in_sample_rate, 0, nullptr); /** Open the resampler with the specified parameters. */ int ret = swr_init(swrCtx); if (ret < 0) { char buff[128]={0}; av_strerror(ret, buff, 128); fprintf(stderr, "Could not open resample context %s\n", buff); swr_free(&swrCtx); swrCtx = nullptr; doOpenVideoFileFailed(); goto end; } //存储pcm数据 int out_linesize = out_sample_rate * audio_tgt_channels; // out_linesize = av_samples_get_buffer_size(NULL, audio_tgt_channels, av_get_bytes_per_sample(out_sample_fmt), out_sample_fmt, 1); out_linesize = AVCODEC_MAX_AUDIO_FRAME_SIZE; // mAudioStream = pFormatCtx->streams[audioStream]; ///打开SDL播放声音 int code = openSDL(); if (code == 0) { SDL_LockAudioDevice(mAudioID); SDL_PauseAudioDevice(mAudioID,0); SDL_UnlockAudioDevice(mAudioID); mIsAudioThreadFinished = false; } else { doOpenSdlFailed(code); } } mPlayerState = VideoPlayer_Playing; doPlayerStateChanged(VideoPlayer_Playing, mVideoStream != nullptr, mAudioStream != nullptr); mVideoStartTime = av_gettime(); fprintf(stderr, "%s mIsQuit=%d mIsPause=%d \n", __FUNCTION__, mIsQuit, mIsPause); ///文件读取结束 跳出循环的情况 ///等待播放完毕 while (!mIsQuit) { mSleep(100); } end: parseCB->quit(); clearAudioQuene(); clearVideoQuene(); if (mPlayerState != VideoPlayer_Stop) //不是外部调用的stop 是正常播放结束 { stop(); } while((mVideoStream != nullptr && !mIsVideoThreadFinished) || (mAudioStream != nullptr && !mIsAudioThreadFinished)) { mSleep(10); } //确保视频线程结束后 再销毁队列 closeSDL(); if (swrCtx != nullptr) { swr_free(&swrCtx); swrCtx = nullptr; } if (aFrame != nullptr) { av_frame_free(&aFrame); aFrame = nullptr; } if (aFrame_ReSample != nullptr) { av_frame_free(&aFrame_ReSample); aFrame_ReSample = nullptr; } if (aCodecCtx != nullptr) { avcodec_close(aCodecCtx); aCodecCtx = nullptr; } if (pCodecCtx != nullptr) { avcodec_close(pCodecCtx); pCodecCtx = nullptr; } // avformat_close_input(&pFormatCtx); // avformat_free_context(pFormatCtx); SDL_Quit(); doPlayerStateChanged(VideoPlayer_Stop, mVideoStream != nullptr, mAudioStream != nullptr); mIsReadThreadFinished = true; fprintf(stderr, "%s finished \n", __FUNCTION__); } void VideoPlayerHK::decodeVideoThread() { fprintf(stderr, "%s start \n", __FUNCTION__); mIsVideoThreadFinished = false; int videoWidth = 0; int videoHeight = 0; double video_pts = 0; //当前视频的pts double audio_pts = 0; //音频pts ///解码视频相关 AVFrame *pFrame = nullptr; AVFrame *pFrameYUV = nullptr; uint8_t *yuv420pBuffer = nullptr; //解码后的yuv数据 struct SwsContext *imgConvertCtx = nullptr; //用于解码后的视频格式转换 // AVCodecContext *pCodecCtx = mVideoStream->codec; //视频解码器 //debug add decoder // find the MPEG-1 video decoder pCodec = avcodec_find_decoder(AV_CODEC_ID_H264); if (!pCodec) { fprintf(stderr, "Codec not found\n"); return; } pCodecCtx = avcodec_alloc_context3(pCodec); if (!pCodecCtx) { fprintf(stderr, "Could not allocate video codec context\n"); return; } auto parser = av_parser_init(pCodec->id); if (!parser) { fprintf(stderr, "parser not found\n"); return; } ///打开视频解码器 if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) { fprintf(stderr, "Could not open video codec.\n"); doOpenVideoFileFailed(); return; } // FILE *fp_v = fopen("./pkts.h264", "wb"); pFrame = av_frame_alloc(); auto pkt = av_packet_alloc(); while(1) { if (mIsQuit) { clearVideoQuene(); //清空队列 break; } if (mIsPause == true) //判断暂停 { mSleep(10); continue; } mConditon_Video->Lock(); static int emptyTimes = 0; if (mVideoPacktList.size() <= 0) { mConditon_Video->Unlock(); if (mIsReadFinished) { //队列里面没有数据了且读取完毕了 break; } else { emptyTimes++; if (emptyTimes >= 50){ //队列里面没有数据了且读取完毕了 break; } mSleep(1); //队列只是暂时没有数据而已 continue; } } emptyTimes = 0; AVPacket pkt1 = mVideoPacktList.front(); mVideoPacktList.pop_front(); mConditon_Video->Unlock(); // fwrite(pkt1.data, 1, pkt1.size, fp_v); AVPacket *packet = &pkt1; //收到这个数据 说明刚刚执行过跳转 现在需要把解码器的数据 清除一下 if(strcmp((char*)packet->data, FLUSH_DATA) == 0) { fprintf(stderr, "strcmp((char*)packet->data, FLUSH_DATA) == 0\n"); avcodec_flush_buffers(pCodecCtx); av_packet_unref(packet); continue; } while(packet->size> 0) { int ret = av_parser_parse2(parser, pCodecCtx, &pkt->data, &pkt->size, packet->data, packet->size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0); if (ret < 0) { fprintf(stderr, "Error while parsing\n"); break; } packet->data += ret; packet->size -= ret; if (pkt->size) { auto ret = avcodec_send_packet(pCodecCtx, pkt); if (ret != 0) { qDebug("input AVPacket to decoder failed!, error:%d\n", ret); av_packet_unref(pkt); continue; } while (0 == avcodec_receive_frame(pCodecCtx, pFrame)) { if (pkt->dts == AV_NOPTS_VALUE && pFrame->opaque&& *(uint64_t*) pFrame->opaque != AV_NOPTS_VALUE) { video_pts = *(uint64_t *) pFrame->opaque; } else if (pkt->dts != AV_NOPTS_VALUE) { video_pts = pkt->dts; } else { video_pts = 0; } // printf("timebase:%d, %d\n", pCodecCtx->time_base.num, pCodecCtx->time_base.den); AVRational timebase = {1, 1200000}; video_pts *= av_q2d(timebase);//mVideoStream->time_base); video_clock = video_pts; //OUTPUT("%s %f \n", __FUNCTION__, video_pts); if (seek_flag_video) { //发生了跳转 则跳过关键帧到目的时间的这几帧 if (video_pts < seek_time) { av_packet_unref(pkt); continue; } else { seek_flag_video = 0; } } ///音视频同步,实现的原理就是,判断是否到显示此帧图像的时间了,没到则休眠5ms,然后继续判断 while(1) { if (mIsQuit) { break; } if (mAudioStream != NULL && !mIsAudioThreadFinished) { if (mIsReadFinished && mAudioPacktList.size() <= 0) {//读取完了 且音频数据也播放完了 就剩下视频数据了 直接显示出来了 不用同步了 break; } ///有音频的情况下,将视频同步到音频 ///跟音频的pts做对比,比视频快则做延时 audio_pts = audio_clock; } else { ///没有音频的情况下,直接同步到外部时钟 audio_pts = (av_gettime() - mVideoStartTime) / 1000000.0; audio_clock = audio_pts; } //OUTPUT("%s %f %f \n", __FUNCTION__, video_pts, audio_pts); //主要是 跳转的时候 我们把video_clock设置成0了 //因此这里需要更新video_pts //否则当从后面跳转到前面的时候 会卡在这里 video_pts = video_clock; if (video_pts <= audio_pts){ break; } int delayTime = (video_pts - audio_pts) * 1000; delayTime = delayTime > 5 ? 5:delayTime; if (!mIsNeedPause) { mSleep(delayTime); } } if (pCodecCtx->width != videoWidth || pCodecCtx->height != videoHeight) { videoWidth = pFrame->width; videoHeight = pFrame->height; if (pFrameYUV != nullptr) { av_free(pFrameYUV); } if (yuv420pBuffer != nullptr) { av_free(yuv420pBuffer); } if (imgConvertCtx != nullptr) { sws_freeContext(imgConvertCtx); } pFrameYUV = av_frame_alloc(); int yuvSize = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1); //按1字节进行内存对齐,得到的内存大小最接近实际大小 // int yuvSize = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 0); //按0字节进行内存对齐,得到的内存大小是0 // int yuvSize = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 4); //按4字节进行内存对齐,得到的内存大小稍微大一些 unsigned int numBytes = static_cast(yuvSize); yuv420pBuffer = static_cast(av_malloc(numBytes * sizeof(uint8_t))); av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, yuv420pBuffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1); ///由于解码后的数据不一定都是yuv420p,因此需要将解码后的数据统一转换成YUV420P imgConvertCtx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); } sws_scale(imgConvertCtx, (uint8_t const * const *) pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize); doDisplayVideo(yuv420pBuffer, pCodecCtx->width, pCodecCtx->height); if (mIsNeedPause) { mIsPause = true; mIsNeedPause = false; } } } } av_packet_unref(&pkt1); mSleep(15); } // if (fp_v) { // fclose(fp_v); // fp_v = NULL; // } parseCB->quit(); av_free(pFrame); if (pFrameYUV != nullptr) { av_free(pFrameYUV); } if (yuv420pBuffer != nullptr) { av_free(yuv420pBuffer); } if (imgConvertCtx != nullptr) { sws_freeContext(imgConvertCtx); } if (!mIsQuit) { mIsQuit = true; } mIsVideoThreadFinished = true; fprintf(stderr, "%s finished \n", __FUNCTION__); return; } int VideoPlayerHK::decodeAudioFrame(bool isBlock) { int audioBufferSize = 0; while(1) { if (mIsQuit) { mIsAudioThreadFinished = true; clearAudioQuene(); //清空队列 break; } if (mIsPause == true) //判断暂停 { break; } mConditon_Audio->Lock(); if (mAudioPacktList.size() <= 0) { if (isBlock) { mConditon_Audio->Wait(); } else { mConditon_Audio->Unlock(); break; } } AVPacket packet = mAudioPacktList.front(); mAudioPacktList.pop_front(); //qDebug()<<__FUNCTION__<Unlock(); AVPacket *pkt = &packet; /* if update, update the audio clock w/pts */ if (pkt->pts != AV_NOPTS_VALUE) { AVRational timebase = {1, 8000}; audio_clock = av_q2d(timebase) * pkt->pts; } //收到这个数据 说明刚刚执行过跳转 现在需要把解码器的数据 清除一下 if(strcmp((char*)pkt->data,FLUSH_DATA) == 0) { avcodec_flush_buffers(aCodecCtx/*mAudioStream->codec*/); av_packet_unref(pkt); continue; } if (seek_flag_audio) { //发生了跳转 则跳过关键帧到目的时间的这几帧 if (audio_clock < seek_time) { continue; } else { seek_flag_audio = 0; } } //解码AVPacket->AVFrame int got_frame = 0; int size = avcodec_decode_audio4(aCodecCtx, aFrame, &got_frame, &packet); //保存重采样之前的一个声道的数据方法 //size_t unpadded_linesize = aFrame->nb_samples * av_get_bytes_per_sample((AVSampleFormat) aFrame->format); //static FILE * fp = fopen("out.pcm", "wb"); //fwrite(aFrame->extended_data[0], 1, unpadded_linesize, fp); av_packet_unref(&packet); if (got_frame) { /// ffmpeg解码之后得到的音频数据不是SDL想要的, /// 因此这里需要重采样成44100 双声道 AV_SAMPLE_FMT_S16 if (aFrame_ReSample == NULL) { aFrame_ReSample = av_frame_alloc(); } if (aFrame_ReSample->nb_samples != aFrame->nb_samples) { aFrame_ReSample->nb_samples = av_rescale_rnd(swr_get_delay(swrCtx, out_sample_rate) + aFrame->nb_samples, out_sample_rate, in_sample_rate, AV_ROUND_UP); av_samples_fill_arrays(aFrame_ReSample->data, aFrame_ReSample->linesize, audio_buf, audio_tgt_channels, aFrame_ReSample->nb_samples, out_sample_fmt, 0); } int len2 = swr_convert(swrCtx, aFrame_ReSample->data, aFrame_ReSample->nb_samples, (const uint8_t**)aFrame->data, aFrame->nb_samples); int resampled_data_size = len2 * audio_tgt_channels * av_get_bytes_per_sample(out_sample_fmt); audioBufferSize = resampled_data_size; break; } } return audioBufferSize; }