| | |
| | | #include "RtspNativeCodecJNI.h" |
| | | #include <PipeLine.h> |
| | | #include <PL_RTSPClient.h> |
| | | #include <PL_AndroidMediaCodecDecoder.h> |
| | | #include <PL_AndroidSurfaceViewSink.h> |
| | | #include "CameraWrapper.h" |
| | | #include <logger.h> |
| | | #include <Logger/src/logger.hpp> |
| | | #include <logger.h> |
| | | |
| | | #include <jni.h> |
| | | #include "looper.h" |
| | | #include "media/NdkMediaCodec.h" |
| | | #include "media/NdkMediaExtractor.h" |
| | | //#include "looper.h" |
| | | #include <android/native_window_jni.h> |
| | | #include <media/NdkMediaCodec.h> |
| | | #include <media/NdkMediaExtractor.h> |
| | | |
| | | #include <pthread.h> |
| | | |
| | | std::stringstream logss; |
| | | //std::fstream logss("/storage/sdcard/log.log", std::fstream::out); |
| | | //std::fstream logss("/storage/emulated/0/log.log", std::fstream::out); |
| | | Logger g_logger(logss); |
| | | |
| | | class CameraWrapper |
| | | { |
| | | PipeLine* pipeLine; |
| | | |
| | | PL_RTSPClient_Config rtspConfig; |
| | | PL_AndroidMediaCodecDecoder_Config amcdConfig; |
| | | PL_AndroidSurfaceViewSink asvsConfig; |
| | | |
| | | jmethodID faceCallback; |
| | | |
| | | CameraWrapper() : pipeLine(nullptr), rtspConfig(), amcdConfig(), asvsConfig(), faceCallback(0) |
| | | { |
| | | } |
| | | |
| | | ~CameraWrapper() |
| | | { |
| | | delete pipeLine; |
| | | } |
| | | |
| | | bool start() |
| | | { |
| | | } |
| | | |
| | | void stop() |
| | | { |
| | | } |
| | | }; |
| | | |
| | | CameraWrapper g_CameraWrappers[CAMERA_COUNT]; |
| | | |
| | | extern "C" |
| | | { |
| | | |
| | | void Java_RtspNativeCodec_init(JNIEnv *env, jclass clazz) |
| | | void Java_cn_com_basic_face_util_RtspFaceNative_init(JNIEnv *env, jclass clazz) |
| | | { |
| | | g_logger.set_level(VERBOSE); |
| | | |
| | | PipeLine::register_global_elem_creator("PL_RTSPClient", create_PL_RTSPClient); |
| | | PipeLine::register_global_elem_creator("PL_AndroidMediaCodecDecoder", create_PL_AndroidMediaCodecDecoder); |
| | | PipeLine::register_global_elem_creator("PL_AndroidSurfaceViewSink", create_PL_AndroidSurfaceViewSink); |
| | | |
| | | #ifdef USE_ST_SDK |
| | | PipeLine::register_global_elem_creator("PL_SensetimeFaceTrack", create_PL_SensetimeFaceTrack); |
| | | #endif |
| | | |
| | | for (size_t i = 0; i < CAMERA_COUNT; i++) |
| | | { |
| | | g_CameraWrappers[i].cameraIdx = i + 1; |
| | | |
| | | g_CameraWrappers[i].pipeLine = new PipeLine; |
| | | PipeLine& pipeLine(*(g_CameraWrappers[i].pipeLine)); |
| | | } |
| | | } |
| | | |
| | | // set the surface |
| | | void Java_RtspNativeCodec_setSurface(JNIEnv *env, jclass clazz, jint cameraIdx, jobject surface) |
| | | void Java_cn_com_basic_face_util_RtspFaceNative_setSurface(JNIEnv *env, jclass clazz, jint cameraIdx, jobject surface) |
| | | { |
| | | LOGV("@@@ Java_RtspNativeCodec_setSurface"); |
| | | assert(cameraIdx <= CAMERA_COUNT); |
| | | |
| | | // obtain a native window from a Java surface |
| | | if (asvsConfig.window) |
| | | { |
| | | ANativeWindow_release((ANativeWindow)(asvsConfig.window)); |
| | | asvsConfig.window = NULL; |
| | | } |
| | | asvsConfig.window = ANativeWindow_fromSurface(env, surface); |
| | | LOGV("@@@ setsurface %p", asvsConfig.window); |
| | | } |
| | | |
| | | jboolean Java_RtspNativeCodec_createPlayer(JNIEnv* env, jclass clazz, jobject assetMgr, jint cameraIdx, jstring uri) |
| | | { |
| | | LOGV("@@@ Java_RtspNativeCodec_createPlayer"); |
| | | assert(cameraIdx <= CAMERA_COUNT); |
| | | LOG_DEBUG << "@@@ Java_cn_com_basic_face_util_RtspFaceNative_setSurface" << LOG_ENDL; |
| | | assert(cameraIdx > 0 && cameraIdx <= CAMERA_COUNT); |
| | | cameraIdx -= 1; |
| | | |
| | | CameraWrapper& cameraWrapper(g_CameraWrappers[cameraIdx]); |
| | | |
| | | // obtain a native window from a Java surface |
| | | if (cameraWrapper.window) |
| | | { |
| | | ANativeWindow_release((ANativeWindow*)(cameraWrapper.window)); |
| | | cameraWrapper.window = NULL; |
| | | } |
| | | cameraWrapper.window = ANativeWindow_fromSurface(env, surface); |
| | | LOGP(DEBUG, "@@@ setsurface %p", cameraWrapper.window); |
| | | } |
| | | |
| | | jboolean Java_cn_com_basic_face_util_RtspFaceNative_createPlayer(JNIEnv* env, jclass clazz, jint cameraIdx, jstring uri) |
| | | { |
| | | LOG_DEBUG << "@@@ Java_cn_com_basic_face_util_RtspFaceNative_createPlayer" << LOG_ENDL; |
| | | assert(cameraIdx > 0 && cameraIdx <= CAMERA_COUNT); |
| | | cameraIdx -= 1; |
| | | |
| | | CameraWrapper& cameraWrapper(g_CameraWrappers[cameraIdx]); |
| | | |
| | | { |
| | | const char *utf8Uri = env->GetStringUTFChars(uri, NULL); |
| | | cameraWrapper.rtspConfig = utf8Uri; |
| | | cameraWrapper.rtspConfig.rtspURL = utf8Uri; |
| | | env->ReleaseStringUTFChars(uri, utf8Uri); |
| | | |
| | | cameraWrapper.rtspConfig.progName = "RtspNativeCodec"; |
| | | cameraWrapper.rtspConfig.rtspURL = utf8; |
| | | cameraWrapper.rtspConfig.aux = true; // ffmpeg need aux, but live555 not |
| | | cameraWrapper.rtspConfig.verbosityLevel = 1; |
| | | cameraWrapper.rtspConfig.tunnelOverHTTPPortNum = 0; |
| | | cameraWrapper.rtspConfig.args = nullptr; |
| | | |
| | | PL_RTSPClient* rtspClient = (PL_RTSPClient*)cameraWrapper.pipeLine.push_elem("PL_RTSPClient"); |
| | | bool ret = rtspClient->init(&(cameraWrapper.rtspConfig)); |
| | | if (!ret) |
| | | { |
| | | LOG_ERROR << "rtspClient.init error" << std::endl; |
| | | return JNI_FALSE; |
| | | } |
| | | } |
| | | |
| | | { |
| | | PL_AndroidMediaCodecDecoder* amcDecoder = (PL_AndroidMediaCodecDecoder*)cameraWrapper.pipeLine.push_elem("PL_AndroidMediaCodecDecoder"); |
| | | cameraWrapper.amcdConfig.ak_mime = ""; |
| | | cameraWrapper.amcdConfig.ak_width = 1920; |
| | | cameraWrapper.amcdConfig.ak_height = 1080; |
| | | bool ret = amcDecoder->init(&(cameraWrapper.amcdConfig)); |
| | | if (!ret) |
| | | { |
| | | LOG_ERROR << "amcDecoder.init error" << std::endl; |
| | | return JNI_FALSE; |
| | | } |
| | | } |
| | | |
| | | { |
| | | PL_AndroidSurfaceViewSink* asvSink = (PL_AndroidSurfaceViewSink*)cameraWrapper.pipeLine.push_elem("PL_AndroidSurfaceViewSink"); |
| | | cameraWrapper.asvsConfig. |
| | | bool ret = asvSink->init(&(cameraWrapper.asvsConfig)); |
| | | if (!ret) |
| | | { |
| | | LOG_ERROR << "asvSink.init error" << std::endl; |
| | | return JNI_FALSE; |
| | | } |
| | | } |
| | | |
| | | return (cameraWrapper.start() ? JNI_TRUE : JNI_FALSE); |
| | | { |
| | | cameraWrapper.amcdConfig.ak_mime = "video/avc"; |
| | | cameraWrapper.amcdConfig.ak_width = 1920; |
| | | cameraWrapper.amcdConfig.ak_height = 1080; |
| | | cameraWrapper.amcdConfig.windowSurface = cameraWrapper.window = nullptr;//#todo |
| | | cameraWrapper.amcdConfig.releaseOutputBuffIdx = true; |
| | | cameraWrapper.amcdConfig.generateDecodedDataPerFrame = 1; |
| | | #ifndef USE_ST_SDK |
| | | cameraWrapper.amcdConfig.releaseOutputBuffIdxInPay = true; |
| | | #endif |
| | | } |
| | | |
| | | { |
| | | cameraWrapper.sftConfig.point_size = 21; |
| | | cameraWrapper.sftConfig.detect_face_cnt_limit = MAX_FACE; |
| | | cameraWrapper.sftConfig.draw_face_rect = false; |
| | | cameraWrapper.sftConfig.draw_face_feature_point = false; |
| | | cameraWrapper.sftConfig.generate_face_feature = true; |
| | | } |
| | | |
| | | bool ret = cameraWrapper.initPl(); |
| | | if (ret) |
| | | return (cameraWrapper.start() ? JNI_TRUE : JNI_FALSE); |
| | | else |
| | | return JNI_FALSE; |
| | | } |
| | | |
| | | // shut down the native media system |
| | | void Java_RtspNativeCodec_shutdown(JNIEnv* env, jclass clazz, jint cameraIdx) |
| | | void Java_cn_com_basic_face_util_RtspFaceNative_shutdown(JNIEnv* env, jclass clazz, jint cameraIdx) |
| | | { |
| | | LOGV("@@@ Java_RtspNativeCodec_shutdown"); |
| | | assert(cameraIdx <= CAMERA_COUNT); |
| | | LOG_DEBUG << "@@@ Java_cn_com_basic_face_util_RtspFaceNative_shutdown" << LOG_ENDL; |
| | | assert(cameraIdx > 0 && cameraIdx <= CAMERA_COUNT); |
| | | cameraIdx -= 1; |
| | | |
| | | CameraWrapper& cameraWrapper(g_CameraWrappers[cameraIdx]); |
| | | cameraWrapper.stop(); |
| | | } |
| | | |
| | | jboolean Java_RtspNativeCodec_setFaceCallback(JNIEnv* env, jclass clazz, jobject assetMgr, jint cameraIdx, jstring func) |
| | | { |
| | | LOGV("@@@ Java_RtspNativeCodec_setFaceCallback"); |
| | | assert(cameraIdx <= CAMERA_COUNT); |
| | | |
| | | CameraWrapper& cameraWrapper(g_CameraWrappers[cameraIdx]); |
| | | |
| | | LOGE("jni callback (0)"); |
| | | jclass cls = env->GetObjectClass(clazz); |
| | | cameraWrapper.faceCallback = env->GetMethodID(cls, func, "(II)V"); // Java_FaceCallback_func |
| | | |
| | | // call: |
| | | //env->CallVoidMethod(obj, callback, 5 , 10); |
| | | //see: https://github.com/BelledonneCommunications/mediastreamer2/blob/master/src/android/android_mediacodec.cpp |
| | | static int handle_java_exception(JNIEnv *env) { |
| | | if (env->ExceptionCheck()) { |
| | | env->ExceptionDescribe(); |
| | | env->ExceptionClear(); |
| | | return -1; |
| | | } |
| | | return 0; |
| | | } |
| | | |
| | | jboolean Java_RtspNativeCodec_getFaceList(JNIEnv* env, jclass clazz, jobject assetMgr, jint cameraIdx, jbyteArray faceListPb) |
| | | { |
| | | assert(cameraIdx <= CAMERA_COUNT); |
| | | static bool _loadClass(JNIEnv *env, const char *className, jclass *_class) { |
| | | *_class = env->FindClass(className); |
| | | if(handle_java_exception(env) == -1 || *_class == NULL) { |
| | | LOGP(ERROR, "Could not load Java class [%s]", className); |
| | | return false; |
| | | } |
| | | return true; |
| | | } |
| | | |
| | | static bool _getStaticMethodID(JNIEnv *env, jclass _class, const char *name, const char *sig, jmethodID *method) { |
| | | *method = env->GetStaticMethodID(_class, name, sig); |
| | | if(handle_java_exception(env) == -1 || *method == NULL) { |
| | | LOGP(ERROR, "Could not get static method %s[%s]", name, sig); |
| | | return false; |
| | | } |
| | | return true; |
| | | } |
| | | |
| | | jboolean Java_cn_com_basic_face_util_RtspFaceNative_setFaceCallback(JNIEnv* env, jclass clazz, jint cameraIdx, jstring className, jstring funcName) |
| | | { |
| | | LOG_DEBUG << "@@@ Java_cn_com_basic_face_util_RtspFaceNative_setFaceCallback" << LOG_ENDL; |
| | | assert(cameraIdx > 0 && cameraIdx <= CAMERA_COUNT); |
| | | cameraIdx -= 1; |
| | | CameraWrapper& cameraWrapper(g_CameraWrappers[cameraIdx]); |
| | | |
| | | //jclass cls = env->GetObjectClass(clazz); |
| | | |
| | | // set for multithread callback |
| | | env->GetJavaVM(&(cameraWrapper.javaVM)); |
| | | cameraWrapper.javaEnv = env; |
| | | |
| | | std::string _className; |
| | | { |
| | | const char *utfFunc = env->GetStringUTFChars(className, NULL); |
| | | _className = utfFunc; |
| | | env->ReleaseStringUTFChars(className, utfFunc); |
| | | } |
| | | std::string _funcName; |
| | | { |
| | | const char *utfFunc = env->GetStringUTFChars(funcName, NULL); |
| | | _funcName = utfFunc; |
| | | env->ReleaseStringUTFChars(funcName, utfFunc); |
| | | } |
| | | |
| | | _loadClass(env, _className.c_str(), &(cameraWrapper.faceCallbackClazz)); |
| | | |
| | | cameraWrapper.faceCallbackClazz = static_cast<jclass>( env->NewGlobalRef( cameraWrapper.faceCallbackClazz )); //#todo need release? |
| | | |
| | | _getStaticMethodID(env, cameraWrapper.faceCallbackClazz, _funcName.c_str(), "(II)V", &(cameraWrapper.faceCallbackFunc)); |
| | | |
| | | // call test (sync) |
| | | cameraWrapper.javaEnv->CallStaticVoidMethod(cameraWrapper.faceCallbackClazz, cameraWrapper.faceCallbackFunc, cameraWrapper.cameraIdx, 0); |
| | | } |
| | | |
| | | void Java_cn_com_basic_face_util_RtspFaceNative_lockFace(JNIEnv* env, jclass clazz, jint cameraIdx) |
| | | { |
| | | LOG_DEBUG << "@@@ Java_cn_com_basic_face_util_RtspFaceNative_lockFace" << LOG_ENDL; |
| | | assert(cameraIdx > 0 && cameraIdx <= CAMERA_COUNT); |
| | | cameraIdx -= 1; |
| | | CameraWrapper& cameraWrapper(g_CameraWrappers[cameraIdx]); |
| | | |
| | | cameraWrapper.lockFace(); |
| | | } |
| | | |
| | | void Java_cn_com_basic_face_util_RtspFaceNative_releaseFace(JNIEnv* env, jclass clazz, jint cameraIdx) |
| | | { |
| | | LOG_DEBUG << "@@@ Java_cn_com_basic_face_util_RtspFaceNative_releaseFace" << LOG_ENDL; |
| | | assert(cameraIdx > 0 && cameraIdx <= CAMERA_COUNT); |
| | | cameraIdx -= 1; |
| | | CameraWrapper& cameraWrapper(g_CameraWrappers[cameraIdx]); |
| | | |
| | | cameraWrapper.releaseFace(); |
| | | } |
| | | |
| | | jint Java_cn_com_basic_face_util_RtspFaceNative_getFaceList(JNIEnv* env, jclass clazz, jint cameraIdx, jbyteArray faceListPb) |
| | | { |
| | | LOG_DEBUG << "@@@ Java_cn_com_basic_face_util_RtspFaceNative_getFaceList" << LOG_ENDL; |
| | | assert(cameraIdx > 0 && cameraIdx <= CAMERA_COUNT); |
| | | cameraIdx -= 1; |
| | | CameraWrapper& cameraWrapper(g_CameraWrappers[cameraIdx]); |
| | | |
| | | // Y channel of YUV420p, packed in protobuf |
| | | uint8_t buffer[MAX_FACE * MAX_FACE_WIDTH * MAX_FACE_HEIGHT]; // #todo optimize not copy data here, set data to jbyteArray directly |
| | | size_t buffSize = sizeof(buffer); |
| | | const size_t arrSize = env->GetArrayLength(faceListPb); |
| | | bool ret = false; |
| | | |
| | | #ifdef USE_ST_SDK |
| | | ret = cameraWrapper.faceCache.getFaceListPb(buffer, buffSize); |
| | | #endif |
| | | |
| | | if (!ret) |
| | | { |
| | | LOG_INFO << "No face captured" << LOG_ENDL; |
| | | return 0; |
| | | } |
| | | else |
| | | { |
| | | LOG_INFO << "Face captured " << LOG_ENDL; |
| | | |
| | | buffSize = std::min(buffSize, arrSize); |
| | | env->SetByteArrayRegion(faceListPb, 0, buffSize, (const jbyte*)buffer); |
| | | return buffSize; |
| | | } |
| | | } |
| | | |
| | | jint Java_cn_com_basic_face_util_RtspFaceNative_getFaceImages(JNIEnv* env, jclass clazz, jint cameraIdx, jintArray faceImagesIdx, jbyteArray faceImages) |
| | | { |
| | | LOG_DEBUG << "@@@ Java_cn_com_basic_face_util_RtspFaceNative_getFaceImages" << LOG_ENDL; |
| | | assert(cameraIdx > 0 && cameraIdx <= CAMERA_COUNT); |
| | | cameraIdx -= 1; |
| | | CameraWrapper& cameraWrapper(g_CameraWrappers[cameraIdx]); |
| | | |
| | | size_t count = 0; |
| | | int _faceImagesIdx[MAX_FACE] = {-1}; |
| | | uint8_t _faceImages[MAX_FACE * MAX_FACE_WIDTH * MAX_FACE_HEIGHT]; |
| | | size_t _faceImagesSize = sizeof(_faceImages); |
| | | bool ret = false; |
| | | |
| | | const size_t faceImagesIdxArrSize = env->GetArrayLength(faceImagesIdx); // count of int |
| | | const size_t faceImagesArrSize = env->GetArrayLength(faceImages); |
| | | |
| | | #ifdef USE_ST_SDK |
| | | ret = cameraWrapper.faceCache.getFaceListImage(_faceImagesIdx, count, _faceImages, _faceImagesSize); |
| | | #endif |
| | | |
| | | if (!ret && count > 0) |
| | | { |
| | | LOG_INFO << "No face image captured" << LOG_ENDL; |
| | | return 0; |
| | | } |
| | | |
| | | int _faceImagesIdxCount = std::min(count, faceImagesIdxArrSize); |
| | | env->SetIntArrayRegion(faceImagesIdx, 0, _faceImagesIdxCount, (const jint*)_faceImagesIdx); |
| | | |
| | | _faceImagesSize = std::min(_faceImagesSize, faceImagesArrSize); |
| | | env->SetByteArrayRegion(faceImages, 0, _faceImagesSize, (const jbyte*)_faceImages); |
| | | |
| | | return count; |
| | | } |
| | | |
| | | |
| | | } |