From af1e5a6e8264d5d95dd7b769e7c401b81afc550d Mon Sep 17 00:00:00 2001
From: zhangmeng <775834166@qq.com>
Date: 星期一, 29 四月 2019 14:21:31 +0800
Subject: [PATCH] gosdk lib init

---
 gosdk/csrc/buz/face/detector.h            |   18 
 gosdk/sdk/face/include/THFaceProperty_i.h |  153 ++
 gosdk/csrc/buz/base.hpp                   |    9 
 gosdk/wsdk.go                             |   41 
 gosdk/csrc/buz/face/extractor.h           |   14 
 gosdk/csrc/buz/yolo/detector.h            |   21 
 gosdk/csrc/all.hpp                        |   11 
 gosdk/sdk/face/include/THFaceLive_i.h     |   62 +
 gosdk/csdk_struct.h                       |   72 +
 gosdk/csdk.cpp                            |   98 +
 gosdk/bridge.go                           |   13 
 gosdk/sdk/darknet/include/yolo.hpp        |  641 +++++++++++
 gosdk/csrc/buz/yolo/detector.cpp          |  139 ++
 gosdk/csrc/buz/face/extractor.cpp         |   54 
 gosdk/csrc/buz/face/detector.cpp          |   60 +
 gosdk/csrc/buz/face/property.cpp          |   34 
 gosdk/gosdk.go                            |  238 ++++
 gosdk/csrc/buz/face/tracker.h             |   19 
 gosdk/sdk/face/readme.txt                 |    1 
 gosdk/c2go.go                             |   64 +
 gosdk/sdk/face/include/FiStdDefEx.h       |   33 
 gosdk/go2c.go                             |   61 +
 gosdk/cgodefs.go                          |   19 
 gosdk/csrc/buz/face/tracker.cpp           |   71 +
 gosdk/sdk/face/include/THFeature_i.h      |  183 +++
 gosdk/csrc/buz/face/property.h            |   14 
 gosdk/sdk/darknet/include/darknet.h       |  812 ++++++++++++++
 gosdk/csdk.h                              |   49 
 gosdk/sdk/face/include/THFaceImage_i.h    |  174 +++
 gosdk/sdk/face/include/THFaceTracking_i.h |  180 +++
 30 files changed, 3,358 insertions(+), 0 deletions(-)

diff --git a/gosdk/bridge.go b/gosdk/bridge.go
new file mode 100644
index 0000000..0e757d1
--- /dev/null
+++ b/gosdk/bridge.go
@@ -0,0 +1,13 @@
+package gosdk
+
+import "image"
+
+// Rect convert CRECT to image Rect
+func Rect(rect CRECT) image.Rectangle {
+	return image.Rect(int(rect.Left), int(rect.Top), int(rect.Right), int(rect.Bottom))
+}
+
+// Pt get CPOINT to image Pt
+func Pt(pt CPOINT) image.Point {
+	return image.Pt(int(pt.X), int(pt.Y))
+}
diff --git a/gosdk/c2go.go b/gosdk/c2go.go
new file mode 100644
index 0000000..d843381
--- /dev/null
+++ b/gosdk/c2go.go
@@ -0,0 +1,64 @@
+package gosdk
+
+/*
+#include <string.h>
+#include "csdk_struct.h"
+
+int get_facepos_size(){
+	return sizeof(cFacePos);
+}
+*/
+import "C"
+import (
+	"unsafe"
+)
+
+// CFacePosArrayToGoArray convert cFacePos array to go
+func CFacePosArrayToGoArray(cArray unsafe.Pointer, count int) (goArray []CFacePos) {
+	p := uintptr(cArray)
+
+	for i := 0; i < count; i++ {
+		j := *(*CFacePos)(unsafe.Pointer(p))
+
+		goArray = append(goArray, j)
+
+		p += unsafe.Sizeof(j)
+	}
+	return
+}
+
+// CYoloObjInfoArrayToGoArray convert cObjInfo array to go
+func CYoloObjInfoArrayToGoArray(cArray unsafe.Pointer, count int) (goArray []CObjInfo) {
+	p := uintptr(cArray)
+
+	for i := 0; i < count; i++ {
+		j := *(*CObjInfo)(unsafe.Pointer(p))
+		goArray = append(goArray, j)
+		p += unsafe.Sizeof(j)
+	}
+	return
+}
+
+// CRECTArrayToGoArray convert cRECT array to go
+func CRECTArrayToGoArray(cArray unsafe.Pointer, count int) (goArray []CRECT) {
+	p := uintptr(cArray)
+
+	for i := 0; i < count; i++ {
+		j := *(*CRECT)(unsafe.Pointer(p))
+		goArray = append(goArray, j)
+		p += unsafe.Sizeof(j)
+	}
+	return
+}
+
+// CFaceInfoArrayToGoArray convert cFaceInfo array to go
+func CFaceInfoArrayToGoArray(cArray unsafe.Pointer, count int) (goArray []CFaceInfo) {
+	p := uintptr(cArray)
+
+	for i := 0; i < count; i++ {
+		j := *(*CFaceInfo)(unsafe.Pointer(p))
+		goArray = append(goArray, j)
+		p += unsafe.Sizeof(j)
+	}
+	return
+}
diff --git a/gosdk/cgodefs.go b/gosdk/cgodefs.go
new file mode 100644
index 0000000..57a1d9c
--- /dev/null
+++ b/gosdk/cgodefs.go
@@ -0,0 +1,19 @@
+// +build ignore
+
+package gosdk
+
+//go:generate go tool cgo -godefs cgodefs.go
+
+/*
+#include "csdk_struct.h"
+*/
+import "C"
+
+type CPOINT C.cPOINT
+type CRECT C.cRECT
+type CIMAGE C.cIMAGE
+type CFaceAngle C.cFaceAngle
+type CThftResult C.cThftResult
+type CFacePos C.cFacePos
+type CFaceInfo C.cFaceInfo
+type CObjInfo C.cObjInfo
diff --git a/gosdk/csdk.cpp b/gosdk/csdk.cpp
new file mode 100644
index 0000000..d61f09f
--- /dev/null
+++ b/gosdk/csdk.cpp
@@ -0,0 +1,98 @@
+#ifdef __cplusplus
+extern "C"{
+#endif
+
+#include "csdk.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#include "csrc/all.hpp"
+
+using namespace csdk_wrap;
+
+static VecFunc dtors_;
+
+int c_api_face_detector_init(const int tm, const int gi, const int minFaces, const int rollAngle){
+    return init_face_detector(tm, gi, minFaces, rollAngle, dtors_);
+}
+
+int c_api_face_property_init(const int tm){
+    return init_face_property(tm, dtors_);
+}
+
+int c_api_face_extractor_init(const int tm, const int gi){
+    return init_face_extractor(tm, gi, dtors_);
+}
+
+int c_api_face_tracker_init(const int tm, const int gi, const int wid, const int hei,
+                              const int maxFaces, const int detinterval, const int sampleSize){
+
+   return init_face_tracker(tm, gi, wid, hei, maxFaces, detinterval, sampleSize, dtors_);
+}
+
+YoloHandle c_api_yolo_init(
+    const char *fcfg, const char *fweights, const char *fname,
+    const int gpu_index){
+
+    return init_yolo_detector(fcfg, fweights, fname, gpu_index, dtors_);    
+}
+
+void c_api_release(){
+    for(auto &i : dtors_){
+        i();
+    }
+}
+
+////////////////////////////////////////////////
+
+cFacePos* c_api_face_detect(int *faceCount, uchar*data, const int w, const int h, const int channel){
+    const cIMAGE img{data, w, h, 3};
+    return face_detect(faceCount, &img, channel);
+}
+
+cThftResult c_api_face_property(const cFacePos* pos, uchar*data, const int w, const int h, const int channel){
+
+    const cIMAGE img{data, w, h, 3};
+    return face_property(*pos, &img, channel);
+}
+
+uchar* c_api_face_extract(int *featLen, const cFacePos* pos, uchar*data, const int w, const int h, const int channel){
+
+    const cIMAGE img{data, w, h, 3};
+    return face_extract_feature(featLen, *pos, &img, channel);
+}
+
+float c_api_face_compare(uchar *feat1, uchar *feat2){
+    return face_compare(feat1, feat2);
+}
+
+cRECT* c_api_face_track_only(int *fCount, uchar *data, const int wid, const int hei, const int channel){
+    const cIMAGE img{data, wid, hei, 3};
+
+    return face_track_only(fCount, &img, channel);
+}
+
+cFaceInfo* c_api_face_track_detect(int *fCount, uchar *data, const int wid, const int hei, const int channel){
+    const cIMAGE img{data, wid, hei, 3};
+
+    return face_track_detect(fCount, &img, channel);
+}
+
+cFaceInfo* c_api_face_track(int *fCount, uchar *data, const int wid, const int hei, const int channel){
+    const cIMAGE img{data, wid, hei, 3};
+    return face_track(fCount, &img, channel);
+}
+
+
+/// yolo api
+cObjInfo* c_api_yolo_detect(YoloHandle handle, int *objCount, uchar*data, const int w, const int h, const float thrsh, const int use_means){
+
+    const cIMAGE img{data, w, h, 3};
+    return yolo_detect(handle, objCount, &img, thrsh, use_means);
+}
+
+const char* c_api_yolo_obj_name(const int typ){
+    return yolo_obj_name_by_type(typ);
+}
\ No newline at end of file
diff --git a/gosdk/csdk.h b/gosdk/csdk.h
new file mode 100644
index 0000000..82bc8fa
--- /dev/null
+++ b/gosdk/csdk.h
@@ -0,0 +1,49 @@
+#ifndef _c_wrapper_sdk_h_
+#define _c_wrapper_sdk_h_
+
+#ifdef __cplusplus
+extern "C"{
+#endif
+
+#include "csdk_struct.h"
+
+#ifndef uchar 
+typedef unsigned char uchar;
+#endif
+
+typedef void* YoloHandle;
+
+int c_api_face_detector_init(const int tm, const int gi, const int minFaces, const int rollAngle);
+int c_api_face_property_init(const int tm);
+int c_api_face_extractor_init(const int tm, const int gi);
+int c_api_face_tracker_init(const int tm, const int gi, const int wid, const int hei,
+                              const int maxFaces, const int detinterval, const int sampleSize);
+
+YoloHandle c_api_yolo_init(
+    const char *fcfg, const char *fweights, const char *fname,
+    const int gpu_index);
+
+void c_api_release();
+///////////////////////////////////////////
+
+/// face api
+cFacePos* c_api_face_detect(int *faceCount, uchar*data, const int w, const int h, const int channel);
+cThftResult c_api_face_property(const cFacePos* pos, uchar*data, const int w, const int h, const int channel);
+
+uchar* c_api_face_extract(int *featLen, const cFacePos* pos, uchar*data, const int w, const int h, const int channel);
+float c_api_face_compare(uchar *feat1, uchar *feat2);
+cRECT* c_api_face_track_only(int *fCount, uchar *data, const int wid, const int hei, const int channel);
+cFaceInfo* c_api_face_track_detect(int *fCount, uchar *data, const int wid, const int hei, const int channel);
+cFaceInfo* c_api_face_track(int *fCount, uchar *data, const int wid, const int hei, const int channel);
+
+/// yolo api
+cObjInfo* c_api_yolo_detect(YoloHandle handle, int *objCount, uchar*data, const int w, const int h, const float thrsh, const int use_means);
+const char* c_api_yolo_obj_name(const int typ);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif
\ No newline at end of file
diff --git a/gosdk/csdk_struct.h b/gosdk/csdk_struct.h
new file mode 100644
index 0000000..f0952f1
--- /dev/null
+++ b/gosdk/csdk_struct.h
@@ -0,0 +1,72 @@
+#ifndef _c_wrapper_c_structure_h_
+#define _c_wrapper_c_structure_h_
+
+#include <string.h>
+
+typedef struct _cPOINT {
+    int x;
+    int y;
+} cPOINT;
+
+typedef struct _cRECT {
+    int left;
+    int top;
+    int right;
+    int bottom;
+} cRECT;
+
+typedef struct _cIMAGE{
+    unsigned char *data;
+    int width;
+    int height;
+    int channel;
+} cIMAGE;
+
+typedef struct _cFaceAngle {
+    int yaw;
+    int pitch;
+    int roll;
+    float confidence;
+} cFaceAngle;
+
+typedef struct _cThftResult {
+    int gender;//1-male,0-female
+    int age;//range[0-100]
+    int race; //[1-white,2-yellow,3-black]
+    int beauty_level;//range[0-100]
+    int smile_level;//range[0-100]
+} cThftResult;
+
+typedef struct _cFacePos {
+    cRECT rcFace;
+    cPOINT ptLeftEye;
+    cPOINT ptRightEye;
+    cPOINT ptMouth;
+    cPOINT ptNose;
+    cFaceAngle fAngle;
+    int nQuality;
+    
+    unsigned char pFacialData[512];
+} cFacePos;
+
+typedef struct _cFaceInfo{
+    cRECT rcFace;
+    cPOINT ptLeftEye;
+    cPOINT ptRightEye;
+    cPOINT ptMouth;
+    cPOINT ptNose;
+    cFaceAngle fAngle;
+    int nQuality;
+    
+    unsigned char pFacialData[8*1024];
+    long        nFaceID;//face tracking id
+} cFaceInfo;
+
+typedef struct _cObjInfo
+{
+    cRECT rcObj;
+    int typ;
+    float prob;
+} cObjInfo;
+
+#endif
\ No newline at end of file
diff --git a/gosdk/csrc/all.hpp b/gosdk/csrc/all.hpp
new file mode 100644
index 0000000..6c90c4b
--- /dev/null
+++ b/gosdk/csrc/all.hpp
@@ -0,0 +1,11 @@
+#ifndef _c_wrapper_face_detector_all_hpp_
+#define _c_wrapper_face_detector_all_hpp_
+
+#include "buz/face/detector.cpp"
+#include "buz/face/extractor.cpp"
+#include "buz/face/property.cpp"
+#include "buz/face/tracker.cpp"
+
+#include "buz/yolo/detector.cpp"
+
+#endif
\ No newline at end of file
diff --git a/gosdk/csrc/buz/base.hpp b/gosdk/csrc/buz/base.hpp
new file mode 100644
index 0000000..c8e6763
--- /dev/null
+++ b/gosdk/csrc/buz/base.hpp
@@ -0,0 +1,9 @@
+#ifndef _c_sdk_wrap_base_class_hpp_
+#define _c_sdk_wrap_base_class_hpp_
+
+#include <vector>
+#include <functional>
+
+using VecFunc = std::vector<std::function<void()> >;
+
+#endif
\ No newline at end of file
diff --git a/gosdk/csrc/buz/face/detector.cpp b/gosdk/csrc/buz/face/detector.cpp
new file mode 100644
index 0000000..399f9f3
--- /dev/null
+++ b/gosdk/csrc/buz/face/detector.cpp
@@ -0,0 +1,60 @@
+#include "detector.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "THFaceImage_i.h"
+
+#include "csdk_struct.h"
+
+namespace csdk_wrap
+{
+    int init_face_detector(const int tm, const int gi, const int minFaces, const int rollAngle,
+                            VecFunc &vec){
+        int ret = 0;
+          if (gi < 0) {
+              THFI_Param *param = new THFI_Param[tm];
+              ret = THFI_Create(tm, param);
+              delete[] param;
+          } else {
+              THFI_Param_Ex *param = new THFI_Param_Ex[tm];
+              THFI_Param detParam;
+              detParam.nMinFaceSize = minFaces;
+              detParam.nRollAngle = rollAngle;
+              for (int i = 0; i < tm; i++) {
+                  param[i].tp = detParam;
+                  param[i].nDeviceID = gi;
+              }
+              ret = THFI_Create_Ex(tm, param);
+              delete[] param;
+          }
+          if(ret != tm){
+              printf("create face detector failed!\n");
+          }else{
+              vec.emplace_back([]{THFI_Release();});
+          }
+          
+          return ret;
+    }
+
+    cFacePos* face_detect(int *faceCount, const cIMAGE *img, const int channel){
+        if(channel < 0 || !img){
+            return NULL;
+        }
+        cFacePos *fpos = NULL;
+        ::THFI_FacePos facesPos[30];
+        int faceNum = THFI_DetectFace(channel, (BYTE*)(img->data), 24, img->width, img->height, facesPos, 30);
+
+        if (faceNum > 0) {
+            fpos =  (cFacePos*)malloc(faceNum * sizeof(cFacePos));
+            *faceCount = faceNum;
+            memcpy(fpos, facesPos, sizeof(THFI_FacePos) * faceNum);
+            
+        }else{
+            printf ("no face detected\n");
+        }
+        return fpos;
+    }
+    
+} // csdk_wrap
diff --git a/gosdk/csrc/buz/face/detector.h b/gosdk/csrc/buz/face/detector.h
new file mode 100644
index 0000000..8a4497d
--- /dev/null
+++ b/gosdk/csrc/buz/face/detector.h
@@ -0,0 +1,18 @@
+#ifndef _c_wrapper_face_detector_hpp_
+#define _c_wrapper_face_detector_hpp_
+
+#include "../base.hpp"
+
+
+struct _cFacePos;
+struct _cIMAGE;
+
+namespace csdk_wrap{
+
+    int init_face_detector(const int tm, const int gi, const int minFaces, const int rollAngle,
+                            VecFunc &vec);
+    cFacePos* face_detect(int *faceCount, const cIMAGE *img, const int channel);
+
+}
+
+#endif
\ No newline at end of file
diff --git a/gosdk/csrc/buz/face/extractor.cpp b/gosdk/csrc/buz/face/extractor.cpp
new file mode 100644
index 0000000..594eb1f
--- /dev/null
+++ b/gosdk/csrc/buz/face/extractor.cpp
@@ -0,0 +1,54 @@
+#include "extractor.h"
+
+#include "THFeature_i.h"
+#include "csdk_struct.h"
+
+namespace csdk_wrap{
+
+    int init_face_extractor(const int tm, const int gi, VecFunc &vec){
+        int ret = 0;
+        if (gi < 0) {
+            ret = EF_Init(tm);
+        } else {
+            EF_Param *param = new EF_Param[tm];
+            for (int i = 0; i < tm; i++) {
+                param[i].nDeviceID = gi;
+            }
+            ret = EF_Init_Ex(tm, param);
+            delete[] param;
+        }
+        if(ret != tm){
+            printf("create face extractor failed!\n");;
+        }else{
+            vec.emplace_back([]{EF_Release();});
+        }
+        return ret;
+    }
+
+    unsigned char* face_extract_feature(int *featLen, const cFacePos &pos, const cIMAGE *img, const int chan){
+        if(!img){
+            printf("face extract error, image or pos null\n");
+            return NULL;
+        }
+        *featLen = EF_Size();
+        unsigned char *feat = (unsigned char*)malloc(*featLen);
+        auto ret = EF_Extract(chan, (BYTE*)(img->data), img->width, img->height, 3, (THFI_FacePos*)(&pos), feat);
+        
+        if(ret != 1){
+            printf("face extract error %d\n", ret);
+            free(feat);
+            *featLen = 0;
+            return NULL;
+        }
+        return feat;
+    }
+
+    float face_compare(unsigned char *feat1, unsigned char *feat2){
+        if (!feat1 || !feat2){
+            return 0.0f;
+        }
+
+        return EF_Compare(feat1, feat2);
+    }
+
+}
\ No newline at end of file
diff --git a/gosdk/csrc/buz/face/extractor.h b/gosdk/csrc/buz/face/extractor.h
new file mode 100644
index 0000000..31bb712
--- /dev/null
+++ b/gosdk/csrc/buz/face/extractor.h
@@ -0,0 +1,14 @@
+#ifndef _c_wrapper_face_extractor_h_
+#define _c_wrapper_face_extractor_h_
+
+#include "../base.hpp"
+
+struct _cFacePos;
+struct _cIMAGE;
+
+namespace csdk_wrap{
+    int init_face_extractor(const int tm, const int gi, VecFunc &vec);
+    unsigned char* face_extract_feature(int *featLen, const cFacePos &pos, const cIMAGE *img, const int chan);
+    float face_compare(unsigned char *feat1, unsigned char *feat2);
+}
+#endif
\ No newline at end of file
diff --git a/gosdk/csrc/buz/face/property.cpp b/gosdk/csrc/buz/face/property.cpp
new file mode 100644
index 0000000..74d4774
--- /dev/null
+++ b/gosdk/csrc/buz/face/property.cpp
@@ -0,0 +1,34 @@
+#include "property.h"
+
+#include "THFaceProperty_i.h"
+#include "csdk_struct.h"
+
+namespace csdk_wrap{
+
+    int init_face_property(const int tm, VecFunc &vec){
+        auto ret = THFP_Create(tm);
+        if(ret != tm){
+            printf("create face property error\n");
+        }else{
+            vec.emplace_back([]{THFP_Release();});
+        }
+        return ret;
+    }
+
+    cThftResult face_property(const cFacePos &pos, const cIMAGE *img, const int chan){
+        cThftResult result;
+        result.gender = result.age = result.race = -1;
+
+        auto ret = THFP_Execute_V2(chan, (BYTE*)(img->data), img->width, img->height,
+            (THFI_FacePos*)(&pos), (THFP_Result_V2*)(&result));
+        if(ret == 0){
+            // printf("property face gender %s, age %d, race %s, beauty level %d, smile_level %d\n", 
+            // result.gender ?"male":"female",
+            // result.age,
+            // result.race==2?"yello":"other",
+            // result.beauty_level, result.smile_level);
+        }
+        return result;
+    }
+
+}
\ No newline at end of file
diff --git a/gosdk/csrc/buz/face/property.h b/gosdk/csrc/buz/face/property.h
new file mode 100644
index 0000000..39d3976
--- /dev/null
+++ b/gosdk/csrc/buz/face/property.h
@@ -0,0 +1,14 @@
+#ifndef _c_wrapper_face_property_h_
+#define _c_wrapper_face_property_h_
+
+#include "../base.hpp"
+
+struct _cThftResult;
+struct _cFacePos;
+struct _cIMAGE;
+
+namespace csdk_wrap{
+    int init_face_property(const int tm, VecFunc &vec);
+    cThftResult face_property(const cFacePos &pos, const cIMAGE *img, const int chan);
+}
+#endif
\ No newline at end of file
diff --git a/gosdk/csrc/buz/face/tracker.cpp b/gosdk/csrc/buz/face/tracker.cpp
new file mode 100644
index 0000000..b0cd03b
--- /dev/null
+++ b/gosdk/csrc/buz/face/tracker.cpp
@@ -0,0 +1,71 @@
+#include "tracker.h"
+
+#include <stdio.h>
+#include "THFaceTracking_i.h"
+
+namespace csdk_wrap{
+    static THFT_Param param;
+    int init_face_tracker(const int tm, const int gi,const int w, const int h,
+                    const int maxFaces, const int detinterval,  const int sampleSize,
+                              VecFunc &vec){
+        param.nDeviceID = gi;
+        param.nImageWidth = w;
+        param.nImageHeight = h;
+        param.nMaxFaceNum = maxFaces;
+        param.nSampleSize = sampleSize > 0 ? sampleSize : w/2;
+        param.nDetectionIntervalFrame = detinterval;
+
+        auto nNum = THFT_Create(tm, &param);
+        if(nNum != tm){
+            printf("create face detector failed!\n");
+        }else{
+            vec.emplace_back([]{THFI_Release();});
+        }
+        return nNum;
+    }
+
+    cRECT* face_track_only(int *faceCount, const cIMAGE *img, const int chan){
+
+        *faceCount = 0;
+
+        cRECT *pFaces = (cRECT*)malloc(param.nMaxFaceNum * sizeof(cRECT));
+        auto nNum = THFT_FaceOnly(chan, img->data, img->width, img->height, (tagRECT*)pFaces, param.nMaxFaceNum, param.nSampleSize);
+        if (nNum > 0)
+        {
+            *faceCount = nNum;
+        }else{
+            free(pFaces);
+            pFaces = NULL;
+        }
+        return pFaces;
+    }
+
+    cFaceInfo* face_track_detect(int *faceCount, const cIMAGE *img, const int chan){
+        *faceCount = 0;
+
+        cFaceInfo* pFaceInfos = (cFaceInfo*)malloc(param.nMaxFaceNum * sizeof(cFaceInfo));
+        auto nNum = THFT_FaceDetect(chan, img->data, img->width, img->height, (THFT_FaceInfo*)pFaceInfos, param.nMaxFaceNum, param.nSampleSize);
+        if (nNum > 0){
+            *faceCount = nNum;
+        }else{
+            free(pFaceInfos);
+            pFaceInfos = NULL;
+        }
+        return pFaceInfos;
+    }
+
+    cFaceInfo* face_track(int *faceCount, const cIMAGE *img, const int chan){
+        *faceCount = 0;
+
+        cFaceInfo* pFaceInfos = (cFaceInfo*)malloc(param.nMaxFaceNum * sizeof(cFaceInfo));
+        auto nNum = THFT_FaceTracking(chan, img->data, (THFT_FaceInfo*)pFaceInfos);
+        if (nNum > 0){
+            *faceCount = nNum;
+        }else{
+            free(pFaceInfos);
+            pFaceInfos = NULL;
+        }
+        return pFaceInfos;
+    }
+
+}
\ No newline at end of file
diff --git a/gosdk/csrc/buz/face/tracker.h b/gosdk/csrc/buz/face/tracker.h
new file mode 100644
index 0000000..1d71ba1
--- /dev/null
+++ b/gosdk/csrc/buz/face/tracker.h
@@ -0,0 +1,19 @@
+#ifndef _c_wrapper_face_tracker_h_
+#define _c_wrapper_face_tracker_h_
+
+#include "../base.hpp"
+
+struct _cRECT;
+struct _cFaceInfo;
+struct _cIMAGE;
+
+namespace csdk_wrap{
+    int init_face_tracker(const int tm, const int gi,const int w, const int h,
+                              const int maxFaces, const int detinterval, const int sampleSize,
+                              VecFunc &vec);
+    cRECT* face_track_only(int *faceCount, const cIMAGE *img, const int chan);
+    cFaceInfo* face_track_detect(int *faceCount, const cIMAGE *img, const int chan);
+
+    cFaceInfo* face_track(int *faceCount, const cIMAGE *img, const int chan);
+}
+#endif
\ No newline at end of file
diff --git a/gosdk/csrc/buz/yolo/detector.cpp b/gosdk/csrc/buz/yolo/detector.cpp
new file mode 100644
index 0000000..1ca2bfe
--- /dev/null
+++ b/gosdk/csrc/buz/yolo/detector.cpp
@@ -0,0 +1,139 @@
+#include "detector.h"
+
+#include <stdlib.h>
+#include <fstream>
+
+#include <sys/time.h>
+
+#include "csdk_struct.h"
+
+#include "yolo.hpp"
+
+namespace csdk_wrap{
+
+    static std::vector<std::string> names;
+
+    static void objects_names_from_file(std::string filename) {
+        std::ifstream file(filename);
+
+        if (!file.is_open()){
+            printf("open %s file error\n", filename.c_str());
+            return;
+        }
+        for(std::string line; getline(file, line);) names.push_back(line);
+
+        printf("names count %d\n", names.size());
+    }
+
+    void* init_yolo_detector(const char *cfg, const char *weights, const char *name,
+                    const int gpu_index, VecFunc &vec){
+
+        if(!cfg || !weights || !name){
+            printf("init Detector error\n");
+            return NULL;
+        }
+    
+        if(names.empty())
+            objects_names_from_file(name);
+
+        auto det = new Detector(cfg, weights, gpu_index);
+        vec.emplace_back([det]{delete det;});
+
+        return det;
+    }
+
+    image_t* buffer_to_image(const unsigned char *data, const int w, const int h, const int color_channel){
+        int size = w*h;
+        int size2 = size*2;
+    
+        int c = color_channel;
+        image_t *img = new image_t;
+        img->h = h;
+        img->w = w;
+        img->c = color_channel;
+        img->data = (float*)calloc(h*w*c, sizeof(float));
+        // image im = make_image(w, h, c);
+        const unsigned char *srcData = data;
+    
+        int count = 0;
+        switch(c){
+            case 1:{
+                for (; count < size; ){
+                    img->data[count] = 
+                    img->data[w*h + count] = 
+                    img->data[w*h*2 + count] = 
+                    (float)(srcData[count])/255.0;
+    
+                    ++count;
+                }
+                break;
+            }
+            case 3:{
+                float* desData = img->data;
+    
+                for(int i = 0;i<size;i++){
+                    *(desData) = *(srcData + 2) /255.0f;
+                    *(desData+size) = *(srcData + 1) /255.0f;
+                    *(desData+size2) = *(srcData) /255.0f;
+    
+                    desData++;
+                    srcData+=3;
+                }
+                break;
+            }
+    
+            default:
+                printf("Channel number not supported.\n");
+                break;
+        }
+
+	    return img;
+    }
+
+    cObjInfo* yolo_detect(void *handle,int *objCount, const cIMAGE *img,  const float thrsh, const bool use_mean){
+        Detector *det = (Detector*)handle;
+
+        const int color_channel = img->channel;
+        image_t* im = buffer_to_image(img->data, img->width, img->height, color_channel);
+
+        // struct timeval b,e;
+        // gettimeofday(&b, NULL);
+        
+        std::vector<bbox_t> result_vec = det->detect(*im, thrsh, use_mean);
+		det->free_image(*im);
+        delete im;
+
+        // gettimeofday(&e,NULL);
+        // double t = e.tv_sec*1000.0 + e.tv_usec/1000.0 - b.tv_sec*1000.0-b.tv_usec/1000.0;
+        // printf("lib yolo detect use time %f ms\n", t);
+
+        cObjInfo *infos = NULL;
+        if(!result_vec.empty()){
+            infos = (cObjInfo*)malloc(result_vec.size() * sizeof(cObjInfo));
+        }
+
+        int count = 0;
+        for(auto &i : result_vec){
+            
+            cObjInfo info;
+            info.typ = i.obj_id;
+            info.prob = i.prob;
+            info.rcObj.left = i.x;
+			info.rcObj.top = i.y;
+			info.rcObj.right = i.x+i.w;
+            info.rcObj.bottom = i.y+i.h;
+            
+            infos[count++] = info;
+        }
+        
+        *objCount = count;
+
+        return infos;
+    }
+
+    const char* yolo_obj_name_by_type(const int typ){
+        if(names.empty() || typ < 0 || typ >= names.size()) return NULL;
+        return names.at(typ).c_str();
+    }
+}
+
diff --git a/gosdk/csrc/buz/yolo/detector.h b/gosdk/csrc/buz/yolo/detector.h
new file mode 100644
index 0000000..1f263b8
--- /dev/null
+++ b/gosdk/csrc/buz/yolo/detector.h
@@ -0,0 +1,21 @@
+#ifndef _c_wrapper_yolo_detector_h_
+#define _c_wrapper_yolo_detector_h_
+
+#include "../base.hpp"
+
+struct _cObjInfo;
+struct _cIMAGE;
+
+struct image_t;
+
+
+namespace csdk_wrap{
+    void* init_yolo_detector(const char *cfg, const char *weights, const char *name,
+                    const int gpu_index, VecFunc &vec);
+    image_t* buffer_to_image(const unsigned char *data, const int w, const int h, const int color_channel);
+    cObjInfo* yolo_detect(void *handle,int *objCount, const cIMAGE *img, const float thrsh, const bool use_mean);
+
+    const char* yolo_obj_name_by_type(const int typ);
+}
+
+#endif
\ No newline at end of file
diff --git a/gosdk/go2c.go b/gosdk/go2c.go
new file mode 100644
index 0000000..b4c6274
--- /dev/null
+++ b/gosdk/go2c.go
@@ -0,0 +1,61 @@
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs cgodefs.go
+
+package gosdk
+
+type CPOINT struct {
+	X	int32
+	Y	int32
+}
+type CRECT struct {
+	Left	int32
+	Top	int32
+	Right	int32
+	Bottom	int32
+}
+type CIMAGE struct {
+	Data		*uint8
+	Width		int32
+	Height		int32
+	Channel		int32
+	Pad_cgo_0	[4]byte
+}
+type CFaceAngle struct {
+	Yaw		int32
+	Pitch		int32
+	Roll		int32
+	Confidence	float32
+}
+type CThftResult struct {
+	Gender		int32
+	Age		int32
+	Race		int32
+	Beauty_level	int32
+	Smile_level	int32
+}
+type CFacePos struct {
+	RcFace		CRECT
+	PtLeftEye	CPOINT
+	PtRightEye	CPOINT
+	PtMouth		CPOINT
+	PtNose		CPOINT
+	FAngle		CFaceAngle
+	NQuality	int32
+	PFacialData	[512]uint8
+}
+type CFaceInfo struct {
+	RcFace		CRECT
+	PtLeftEye	CPOINT
+	PtRightEye	CPOINT
+	PtMouth		CPOINT
+	PtNose		CPOINT
+	FAngle		CFaceAngle
+	NQuality	int32
+	PFacialData	[8192]uint8
+	NFaceID		int64
+}
+type CObjInfo struct {
+	RcObj	CRECT
+	Typ	int32
+	Prob	float32
+}
diff --git a/gosdk/gosdk.go b/gosdk/gosdk.go
new file mode 100644
index 0000000..240cfdb
--- /dev/null
+++ b/gosdk/gosdk.go
@@ -0,0 +1,238 @@
+package gosdk
+
+/*
+#cgo CFLAGS: -I. -I./sdk/face/include -I./sdk/darknet/include -I/usr/local/cuda/include -w -g
+#cgo CXXFLAGS: -I. -I./sdk/face/include -I./sdk/darknet/include -I/usr/local/cuda/include -w -g -std=c++11
+#cgo LDFLAGS: -L/usr/local/cuda/lib64 -L${SRCDIR}/sdk/face/lib/gpu -L${SRCDIR}/sdk/darknet/lib
+#cgo LDFLAGS: -Wl,-rpath,${SRCDIR}/sdk/face/lib/gpu:${SRCDIR}/sdk/darknet/lib
+#cgo LDFLAGS: -ldarknet -lTHFaceImage -lTHFeature -lTHFaceProperty -lTHFaceTracking -lcudart -lcublas -lcurand -lrt -ldl -lpthread
+#include <stdlib.h>
+#include "csdk.h"
+*/
+import "C"
+import (
+	"unsafe"
+)
+
+// YoloHandle wrap C
+type YoloHandle struct {
+	handle C.YoloHandle
+}
+
+// SDKImage sdk image
+type SDKImage struct {
+	Data   []byte
+	Width  int
+	Height int
+}
+
+// InitYolo init yolo sdk
+func InitYolo(fc, fw, fn string, gi int) *YoloHandle {
+
+	c := C.CString(fc)
+	defer C.free(unsafe.Pointer(c))
+	w := C.CString(fw)
+	defer C.free(unsafe.Pointer(w))
+	n := C.CString(fn)
+	defer C.free(unsafe.Pointer(n))
+
+	g := C.int(gi)
+
+	p := C.c_api_yolo_init(c, w, n, g)
+	return &YoloHandle{p}
+}
+
+// InitFaceDetector init face detector
+func InitFaceDetector(tm, gi, minFaces, rollAngle int) {
+
+	C.c_api_face_detector_init(C.int(tm), C.int(gi), C.int(minFaces), C.int(rollAngle))
+}
+
+// InitFaceProperty init face property
+func InitFaceProperty(tm int) {
+
+	C.c_api_face_property_init(C.int(tm))
+}
+
+// InitFaceExtractor init face extractor
+func InitFaceExtractor(tm, gi int) {
+	C.c_api_face_extractor_init(C.int(tm), C.int(gi))
+}
+
+// InitFaceTracker init face tracker
+func InitFaceTracker(tm, gi, w, h, maxFaces, interval, sample int) {
+
+	C.c_api_face_tracker_init(C.int(tm), C.int(gi), C.int(w), C.int(h), C.int(maxFaces), C.int(interval), C.int(sample))
+}
+
+// Free free sdk
+func Free() {
+	C.c_api_release()
+}
+
+// FaceDetect face detect
+func FaceDetect(img SDKImage, ch int) []CFacePos {
+
+	data := img.Data
+	w := img.Width
+	h := img.Height
+
+	var count C.int
+	cfpos := C.c_api_face_detect(&count, (*C.uchar)(unsafe.Pointer(&data[0])), C.int(w), C.int(h), C.int(ch))
+	if cfpos != nil {
+		defer C.free(unsafe.Pointer(cfpos))
+		return CFacePosArrayToGoArray(unsafe.Pointer(cfpos), int(count))
+	}
+	return nil
+}
+
+// FaceProperty face property
+func FaceProperty(fpos CFacePos, img SDKImage, ch int) CThftResult {
+	data := img.Data
+	w := img.Width
+	h := img.Height
+
+	pos := (*C.cFacePos)(unsafe.Pointer(&fpos))
+	result := C.c_api_face_property(pos, (*C.uchar)(unsafe.Pointer(&data[0])), C.int(w), C.int(h), C.int(ch))
+
+	return *((*CThftResult)(unsafe.Pointer(&result)))
+}
+
+// FaceExtract face extract feature
+func FaceExtract(fpos CFacePos, img SDKImage, ch int) []byte {
+
+	data := img.Data
+	w := img.Width
+	h := img.Height
+
+	var featLen C.int
+	pos := (*C.cFacePos)(unsafe.Pointer(&fpos))
+
+	p := C.c_api_face_extract(&featLen, pos, (*C.uchar)(unsafe.Pointer(&data[0])), C.int(w), C.int(h), C.int(ch))
+	defer C.free(unsafe.Pointer(p))
+	b := C.GoBytes(unsafe.Pointer(p), featLen)
+	return b
+}
+
+// FaceCompare face compare
+func FaceCompare(feat1 []byte, feat2 []byte) float32 {
+	res := C.c_api_face_compare((*C.uchar)(unsafe.Pointer(&feat1[0])), (*C.uchar)(unsafe.Pointer(&feat2[0])))
+	return float32(res)
+}
+
+// FaceTrackOnly face tracker face only
+func FaceTrackOnly(img SDKImage, ch int) []CRECT {
+	data := img.Data
+	w := img.Width
+	h := img.Height
+
+	var fCount C.int
+	rect := C.c_api_face_track_only(&fCount, (*C.uchar)(unsafe.Pointer(&data[0])), C.int(w), C.int(h), C.int(ch))
+	if rect != nil {
+		defer C.free(unsafe.Pointer(rect))
+		return CRECTArrayToGoArray(unsafe.Pointer(rect), int(fCount))
+	}
+	return nil
+}
+
+// FaceTrackDetect face tracker face detect
+func FaceTrackDetect(img SDKImage, ch int) []CFaceInfo {
+	data := img.Data
+	w := img.Width
+	h := img.Height
+
+	var fCount C.int
+	finfo := C.c_api_face_track_only(&fCount, (*C.uchar)(unsafe.Pointer(&data[0])), C.int(w), C.int(h), C.int(ch))
+	if finfo != nil {
+		defer C.free(unsafe.Pointer(finfo))
+		return CFaceInfoArrayToGoArray(unsafe.Pointer(finfo), int(fCount))
+	}
+	return nil
+}
+
+// FaceTrackingInfo face track info
+type FaceTrackingInfo struct {
+	tracking bool
+	faces    []CFaceInfo
+}
+
+// FaceTrack face tracking info
+func FaceTrack(res map[int64]FaceTrackingInfo, img SDKImage, ch int) {
+	data := img.Data
+	w := img.Width
+	h := img.Height
+
+	var fCount C.int
+	cFinfo := C.c_api_face_track(&fCount, (*C.uchar)(unsafe.Pointer(&data[0])), C.int(w), C.int(h), C.int(ch))
+	if cFinfo == nil {
+		return
+	}
+	defer C.free(unsafe.Pointer(cFinfo))
+	goFinfo := CFaceInfoArrayToGoArray(unsafe.Pointer(cFinfo), int(fCount))
+
+	// 绌猴紝娣诲姞
+	if len(res) == 0 {
+		for _, v := range goFinfo {
+			var faces []CFaceInfo
+			faces = append(faces, v)
+			tInfo := FaceTrackingInfo{true, faces}
+			res[v.NFaceID] = tInfo
+		}
+		// 闈炵┖
+	} else {
+		// 榛樿鎵�鏈塮aces娌℃湁tracking
+		for _, v := range res {
+			v.tracking = false
+		}
+		// 鏌ユ壘鏄惁瀛樺湪tracking
+		for _, v := range goFinfo {
+			// 瀛樺湪宸叉湁淇℃伅
+			if info, ok := res[v.NFaceID]; ok {
+				// tracking鍒癴ace
+				info.tracking = true
+				// 濡傛灉tracking闀垮害澶т簬100锛屽垹闄や竴浜�
+				if len(info.faces) > 100 {
+					info.faces = info.faces[1:]
+				}
+				info.faces = append(info.faces, v)
+				// 涓嶅瓨鍦紝鏂癴ace锛屽姞鍏�
+			} else {
+				var faces []CFaceInfo
+				faces = append(faces, v)
+				tInfo := FaceTrackingInfo{true, faces}
+				res[v.NFaceID] = tInfo
+			}
+		}
+		// 鍒犻櫎涓嶅啀tracking鐨刦aces
+		for k, v := range res {
+			if !v.tracking {
+				delete(res, k)
+			}
+		}
+	}
+}
+
+// YoloDetect yolo detect
+func YoloDetect(handle *YoloHandle, img SDKImage, thrsh float32, umns int) []CObjInfo {
+
+	data := img.Data
+	w := img.Width
+	h := img.Height
+
+	var count C.int
+
+	cobjinfo := C.c_api_yolo_detect(handle.handle, &count, (*C.uchar)(unsafe.Pointer(&data[0])), C.int(w), C.int(h), C.float(thrsh), C.int(umns))
+
+	if cobjinfo != nil {
+		defer C.free(unsafe.Pointer(cobjinfo))
+		return CYoloObjInfoArrayToGoArray(unsafe.Pointer(cobjinfo), int(count))
+	}
+	return nil
+}
+
+// YoloObjName obj name by type
+func YoloObjName(typ int) string {
+	p := C.c_api_yolo_obj_name(C.int(typ))
+
+	return C.GoString(p)
+}
diff --git a/gosdk/sdk/darknet/include/darknet.h b/gosdk/sdk/darknet/include/darknet.h
new file mode 100755
index 0000000..c3f5628
--- /dev/null
+++ b/gosdk/sdk/darknet/include/darknet.h
@@ -0,0 +1,812 @@
+#ifndef DARKNET_API
+#define DARKNET_API
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <pthread.h>
+
+#define SECRET_NUM -1234
+extern int gpu_index;
+
+#ifdef GPU
+    #define BLOCK 512
+
+    #include "cuda_runtime.h"
+    #include "curand.h"
+    #include "cublas_v2.h"
+
+    #ifdef CUDNN
+    #include "cudnn.h"
+    #endif
+#endif
+
+#ifndef __cplusplus
+    #ifdef OPENCV
+    #include "opencv2/highgui/highgui_c.h"
+    #include "opencv2/imgproc/imgproc_c.h"
+    #include "opencv2/core/version.hpp"
+    #if CV_MAJOR_VERSION == 3
+    #include "opencv2/videoio/videoio_c.h"
+    #include "opencv2/imgcodecs/imgcodecs_c.h"
+    #endif
+    #endif
+#endif
+
+typedef struct{
+    int classes;
+    char **names;
+} metadata;
+
+metadata get_metadata(char *file);
+
+typedef struct{
+    int *leaf;
+    int n;
+    int *parent;
+    int *child;
+    int *group;
+    char **name;
+
+    int groups;
+    int *group_size;
+    int *group_offset;
+} tree;
+tree *read_tree(char *filename);
+
+typedef enum{
+    LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN
+} ACTIVATION;
+
+typedef enum{
+    MULT, ADD, SUB, DIV
+} BINARY_ACTIVATION;
+
+typedef enum {
+    CONVOLUTIONAL,
+    DECONVOLUTIONAL,
+    CONNECTED,
+    MAXPOOL,
+    SOFTMAX,
+    DETECTION,
+    DROPOUT,
+    CROP,
+    ROUTE,
+    COST,
+    NORMALIZATION,
+    AVGPOOL,
+    LOCAL,
+    SHORTCUT,
+    ACTIVE,
+    RNN,
+    GRU,
+    LSTM,
+    CRNN,
+    BATCHNORM,
+    NETWORK,
+    XNOR,
+    REGION,
+    YOLO,
+    REORG,
+    UPSAMPLE,
+    LOGXENT,
+    L2NORM,
+    BLANK
+} LAYER_TYPE;
+
+typedef enum{
+    SSE, MASKED, L1, SEG, SMOOTH,WGAN
+} COST_TYPE;
+
+typedef struct{
+    int batch;
+    float learning_rate;
+    float momentum;
+    float decay;
+    int adam;
+    float B1;
+    float B2;
+    float eps;
+    int t;
+} update_args;
+
+struct network;
+typedef struct network network;
+
+struct layer;
+typedef struct layer layer;
+
+struct layer{
+    LAYER_TYPE type;
+    ACTIVATION activation;
+    COST_TYPE cost_type;
+    void (*forward)   (struct layer, struct network);
+    void (*backward)  (struct layer, struct network);
+    void (*update)    (struct layer, update_args);
+    void (*forward_gpu)   (struct layer, struct network);
+    void (*backward_gpu)  (struct layer, struct network);
+    void (*update_gpu)    (struct layer, update_args);
+    int batch_normalize;
+    int shortcut;
+    int batch;
+    int forced;
+    int flipped;
+    int inputs;
+    int outputs;
+    int nweights;
+    int nbiases;
+    int extra;
+    int truths;
+    int h,w,c;
+    int out_h, out_w, out_c;
+    int n;
+    int max_boxes;
+    int groups;
+    int size;
+    int side;
+    int stride;
+    int reverse;
+    int flatten;
+    int spatial;
+    int pad;
+    int sqrt;
+    int flip;
+    int index;
+    int binary;
+    int xnor;
+    int steps;
+    int hidden;
+    int truth;
+    float smooth;
+    float dot;
+    float angle;
+    float jitter;
+    float saturation;
+    float exposure;
+    float shift;
+    float ratio;
+    float learning_rate_scale;
+    float clip;
+    int softmax;
+    int classes;
+    int coords;
+    int background;
+    int rescore;
+    int objectness;
+    int joint;
+    int noadjust;
+    int reorg;
+    int log;
+    int tanh;
+    int *mask;
+    int total;
+
+    float alpha;
+    float beta;
+    float kappa;
+
+    float coord_scale;
+    float object_scale;
+    float noobject_scale;
+    float mask_scale;
+    float class_scale;
+    int bias_match;
+    int random;
+    float ignore_thresh;
+    float truth_thresh;
+    float thresh;
+    float focus;
+    int classfix;
+    int absolute;
+
+    int onlyforward;
+    int stopbackward;
+    int dontload;
+    int dontsave;
+    int dontloadscales;
+
+    float temperature;
+    float probability;
+    float scale;
+
+    char  * cweights;
+    int   * indexes;
+    int   * input_layers;
+    int   * input_sizes;
+    int   * map;
+    float * rand;
+    float * cost;
+    float * state;
+    float * prev_state;
+    float * forgot_state;
+    float * forgot_delta;
+    float * state_delta;
+    float * combine_cpu;
+    float * combine_delta_cpu;
+
+    float * concat;
+    float * concat_delta;
+
+    float * binary_weights;
+
+    float * biases;
+    float * bias_updates;
+
+    float * scales;
+    float * scale_updates;
+
+    float * weights;
+    float * weight_updates;
+
+    float * delta;
+    float * output;
+    float * loss;
+    float * squared;
+    float * norms;
+
+    float * spatial_mean;
+    float * mean;
+    float * variance;
+
+    float * mean_delta;
+    float * variance_delta;
+
+    float * rolling_mean;
+    float * rolling_variance;
+
+    float * x;
+    float * x_norm;
+
+    float * m;
+    float * v;
+    
+    float * bias_m;
+    float * bias_v;
+    float * scale_m;
+    float * scale_v;
+
+
+    float *z_cpu;
+    float *r_cpu;
+    float *h_cpu;
+    float * prev_state_cpu;
+
+    float *temp_cpu;
+    float *temp2_cpu;
+    float *temp3_cpu;
+
+    float *dh_cpu;
+    float *hh_cpu;
+    float *prev_cell_cpu;
+    float *cell_cpu;
+    float *f_cpu;
+    float *i_cpu;
+    float *g_cpu;
+    float *o_cpu;
+    float *c_cpu;
+    float *dc_cpu; 
+
+    float * binary_input;
+
+    struct layer *input_layer;
+    struct layer *self_layer;
+    struct layer *output_layer;
+
+    struct layer *reset_layer;
+    struct layer *update_layer;
+    struct layer *state_layer;
+
+    struct layer *input_gate_layer;
+    struct layer *state_gate_layer;
+    struct layer *input_save_layer;
+    struct layer *state_save_layer;
+    struct layer *input_state_layer;
+    struct layer *state_state_layer;
+
+    struct layer *input_z_layer;
+    struct layer *state_z_layer;
+
+    struct layer *input_r_layer;
+    struct layer *state_r_layer;
+
+    struct layer *input_h_layer;
+    struct layer *state_h_layer;
+	
+    struct layer *wz;
+    struct layer *uz;
+    struct layer *wr;
+    struct layer *ur;
+    struct layer *wh;
+    struct layer *uh;
+    struct layer *uo;
+    struct layer *wo;
+    struct layer *uf;
+    struct layer *wf;
+    struct layer *ui;
+    struct layer *wi;
+    struct layer *ug;
+    struct layer *wg;
+
+    tree *softmax_tree;
+
+    size_t workspace_size;
+
+#ifdef GPU
+    int *indexes_gpu;
+
+    float *z_gpu;
+    float *r_gpu;
+    float *h_gpu;
+
+    float *temp_gpu;
+    float *temp2_gpu;
+    float *temp3_gpu;
+
+    float *dh_gpu;
+    float *hh_gpu;
+    float *prev_cell_gpu;
+    float *cell_gpu;
+    float *f_gpu;
+    float *i_gpu;
+    float *g_gpu;
+    float *o_gpu;
+    float *c_gpu;
+    float *dc_gpu; 
+
+    float *m_gpu;
+    float *v_gpu;
+    float *bias_m_gpu;
+    float *scale_m_gpu;
+    float *bias_v_gpu;
+    float *scale_v_gpu;
+
+    float * combine_gpu;
+    float * combine_delta_gpu;
+
+    float * prev_state_gpu;
+    float * forgot_state_gpu;
+    float * forgot_delta_gpu;
+    float * state_gpu;
+    float * state_delta_gpu;
+    float * gate_gpu;
+    float * gate_delta_gpu;
+    float * save_gpu;
+    float * save_delta_gpu;
+    float * concat_gpu;
+    float * concat_delta_gpu;
+
+    float * binary_input_gpu;
+    float * binary_weights_gpu;
+
+    float * mean_gpu;
+    float * variance_gpu;
+
+    float * rolling_mean_gpu;
+    float * rolling_variance_gpu;
+
+    float * variance_delta_gpu;
+    float * mean_delta_gpu;
+
+    float * x_gpu;
+    float * x_norm_gpu;
+    float * weights_gpu;
+    float * weight_updates_gpu;
+    float * weight_change_gpu;
+
+    float * biases_gpu;
+    float * bias_updates_gpu;
+    float * bias_change_gpu;
+
+    float * scales_gpu;
+    float * scale_updates_gpu;
+    float * scale_change_gpu;
+
+    float * output_gpu;
+    float * loss_gpu;
+    float * delta_gpu;
+    float * rand_gpu;
+    float * squared_gpu;
+    float * norms_gpu;
+#ifdef CUDNN
+    cudnnTensorDescriptor_t srcTensorDesc, dstTensorDesc;
+    cudnnTensorDescriptor_t dsrcTensorDesc, ddstTensorDesc;
+    cudnnTensorDescriptor_t normTensorDesc;
+    cudnnFilterDescriptor_t weightDesc;
+    cudnnFilterDescriptor_t dweightDesc;
+    cudnnConvolutionDescriptor_t convDesc;
+    cudnnConvolutionFwdAlgo_t fw_algo;
+    cudnnConvolutionBwdDataAlgo_t bd_algo;
+    cudnnConvolutionBwdFilterAlgo_t bf_algo;
+#endif
+#endif
+};
+
+void free_layer(layer);
+
+typedef enum {
+    CONSTANT, STEP, EXP, POLY, STEPS, SIG, RANDOM
+} learning_rate_policy;
+
+typedef struct network{
+    int n;
+    int batch;
+    size_t *seen;
+    int *t;
+    float epoch;
+    int subdivisions;
+    layer *layers;
+    float *output;
+    learning_rate_policy policy;
+
+    float learning_rate;
+    float momentum;
+    float decay;
+    float gamma;
+    float scale;
+    float power;
+    int time_steps;
+    int step;
+    int max_batches;
+    float *scales;
+    int   *steps;
+    int num_steps;
+    int burn_in;
+
+    int adam;
+    float B1;
+    float B2;
+    float eps;
+
+    int inputs;
+    int outputs;
+    int truths;
+    int notruth;
+    int h, w, c;
+    int max_crop;
+    int min_crop;
+    float max_ratio;
+    float min_ratio;
+    int center;
+    float angle;
+    float aspect;
+    float exposure;
+    float saturation;
+    float hue;
+    int random;
+
+    int gpu_index;
+    tree *hierarchy;
+
+    float *input;
+    float *truth;
+    float *delta;
+    float *workspace;
+    int train;
+    int index;
+    float *cost;
+    float clip;
+
+#ifdef GPU
+    float *input_gpu;
+    float *truth_gpu;
+    float *delta_gpu;
+    float *output_gpu;
+#endif
+
+} network;
+
+typedef struct {
+    int w;
+    int h;
+    float scale;
+    float rad;
+    float dx;
+    float dy;
+    float aspect;
+} augment_args;
+
+typedef struct {
+    int w;
+    int h;
+    int c;
+    float *data;
+} image;
+
+typedef struct{
+    float x, y, w, h;
+} box;
+
+typedef struct detection{
+    box bbox;
+    int classes;
+    float *prob;
+    float *mask;
+    float objectness;
+    int sort_class;
+} detection;
+
+typedef struct matrix{
+    int rows, cols;
+    float **vals;
+} matrix;
+
+
+typedef struct{
+    int w, h;
+    matrix X;
+    matrix y;
+    int shallow;
+    int *num_boxes;
+    box **boxes;
+} data;
+
+typedef enum {
+    CLASSIFICATION_DATA, DETECTION_DATA, CAPTCHA_DATA, REGION_DATA, IMAGE_DATA, COMPARE_DATA, WRITING_DATA, SWAG_DATA, TAG_DATA, OLD_CLASSIFICATION_DATA, STUDY_DATA, DET_DATA, SUPER_DATA, LETTERBOX_DATA, REGRESSION_DATA, SEGMENTATION_DATA, INSTANCE_DATA
+} data_type;
+
+typedef struct load_args{
+    int threads;
+    char **paths;
+    char *path;
+    int n;
+    int m;
+    char **labels;
+    int h;
+    int w;
+    int out_w;
+    int out_h;
+    int nh;
+    int nw;
+    int num_boxes;
+    int min, max, size;
+    int classes;
+    int background;
+    int scale;
+    int center;
+    int coords;
+    float jitter;
+    float angle;
+    float aspect;
+    float saturation;
+    float exposure;
+    float hue;
+    data *d;
+    image *im;
+    image *resized;
+    data_type type;
+    tree *hierarchy;
+} load_args;
+
+typedef struct{
+    int id;
+    float x,y,w,h;
+    float left, right, top, bottom;
+} box_label;
+
+
+void free_data(data d);
+
+typedef struct node{
+    void *val;
+    struct node *next;
+    struct node *prev;
+} node;
+
+typedef struct list{
+    int size;
+    node *front;
+    node *back;
+} list;
+
+#ifdef __cplusplus
+extern "C" {               // 鍛婅瘔缂栬瘧鍣ㄤ笅鍒椾唬鐮佽浠閾炬帴绾﹀畾鐨勬ā寮忚繘琛岄摼鎺�
+#endif
+
+//#todo
+network *load_network(char *cfg, char *weights, int clear);
+load_args get_base_args(network *net);
+
+pthread_t load_data(load_args args);
+list *read_data_cfg(char *filename);
+list *read_cfg(char *filename);
+unsigned char *read_file(char *filename);
+data resize_data(data orig, int w, int h);
+data *tile_data(data orig, int divs, int size);
+data select_data(data *orig, int *inds);
+
+void forward_network(network *net);
+void backward_network(network *net);
+void update_network(network *net);
+
+
+float dot_cpu(int N, float *X, int INCX, float *Y, int INCY);
+void axpy_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY);
+void copy_cpu(int N, float *X, int INCX, float *Y, int INCY);
+void scal_cpu(int N, float ALPHA, float *X, int INCX);
+void fill_cpu(int N, float ALPHA, float * X, int INCX);
+void normalize_cpu(float *x, float *mean, float *variance, int batch, int filters, int spatial);
+void softmax(float *input, int n, float temp, int stride, float *output);
+
+int best_3d_shift_r(image a, image b, int min, int max);
+
+image get_label(image **characters, char *string, int size);
+void draw_label(image a, int r, int c, image label, const float *rgb);
+void save_image_png(image im, const char *name);
+void get_next_batch(data d, int n, int offset, float *X, float *y);
+void grayscale_image_3c(image im);
+void normalize_image(image p);
+void matrix_to_csv(matrix m);
+float train_network_sgd(network *net, data d, int n);
+void rgbgr_image(image im);
+data copy_data(data d);
+data concat_data(data d1, data d2);
+data load_cifar10_data(char *filename);
+float matrix_topk_accuracy(matrix truth, matrix guess, int k);
+void matrix_add_matrix(matrix from, matrix to);
+void scale_matrix(matrix m, float scale);
+matrix csv_to_matrix(char *filename);
+float *network_accuracies(network *net, data d, int n);
+float train_network_datum(network *net);
+image make_random_image(int w, int h, int c);
+
+void denormalize_connected_layer(layer l);
+void denormalize_convolutional_layer(layer l);
+void statistics_connected_layer(layer l);
+void rescale_weights(layer l, float scale, float trans);
+void rgbgr_weights(layer l);
+image *get_weights(layer l);
+
+void demo(char *cfgfile, char *weightfile, float thresh, int cam_index, const char *filename, char **names, int classes, int frame_skip, char *prefix, int avg, float hier_thresh, int w, int h, int fps, int fullscreen);
+void get_detection_detections(layer l, int w, int h, float thresh, detection *dets);
+
+char *option_find_str(list *l, char *key, char *def);
+int option_find_int(list *l, char *key, int def);
+int option_find_int_quiet(list *l, char *key, int def);
+
+network *parse_network_cfg(char *filename);
+void save_weights(network *net, char *filename);
+void load_weights(network *net, char *filename);
+void save_weights_upto(network *net, char *filename, int cutoff);
+void load_weights_upto(network *net, char *filename, int start, int cutoff);
+
+void zero_objectness(layer l);
+void get_region_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, float tree_thresh, int relative, detection *dets);
+int get_yolo_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, int relative, detection *dets);
+void free_network(network *net);
+void set_batch_network(network *net, int b);
+void set_temp_network(network *net, float t);
+image load_image(char *filename, int w, int h, int c);
+image load_image_color(char *filename, int w, int h);
+image make_image(int w, int h, int c);
+image resize_image(image im, int w, int h);
+void censor_image(image im, int dx, int dy, int w, int h);
+image letterbox_image(image im, int w, int h);
+image crop_image(image im, int dx, int dy, int w, int h);
+image center_crop_image(image im, int w, int h);
+image resize_min(image im, int min);
+image resize_max(image im, int max);
+image threshold_image(image im, float thresh);
+image mask_to_rgb(image mask);
+int resize_network(network *net, int w, int h);
+void free_matrix(matrix m);
+void test_resize(char *filename);
+void save_image(image p, const char *name);
+void show_image(image p, const char *name);
+image copy_image(image p);
+void draw_box_width(image a, int x1, int y1, int x2, int y2, int w, float r, float g, float b);
+float get_current_rate(network *net);
+void composite_3d(char *f1, char *f2, char *out, int delta);
+data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h);
+size_t get_current_batch(network *net);
+void constrain_image(image im);
+image get_network_image_layer(network *net, int i);
+layer get_network_output_layer(network *net);
+void top_predictions(network *net, int n, int *index);
+void flip_image(image a);
+image float_to_image(int w, int h, int c, float *data);
+void ghost_image(image source, image dest, int dx, int dy);
+float network_accuracy(network *net, data d);
+void random_distort_image(image im, float hue, float saturation, float exposure);
+void fill_image(image m, float s);
+image grayscale_image(image im);
+void rotate_image_cw(image im, int times);
+double what_time_is_it_now();
+image rotate_image(image m, float rad);
+void visualize_network(network *net);
+float box_iou(box a, box b);
+data load_all_cifar10();
+box_label *read_boxes(char *filename, int *n);
+box float_to_box(float *f, int stride);
+void draw_detections(image im, detection *dets, int num, float thresh, char **names, image **alphabet, int classes);
+
+matrix network_predict_data(network *net, data test);
+image **load_alphabet();
+image get_network_image(network *net);
+float *network_predict(network *net, float *input);
+
+int network_width(network *net);
+int network_height(network *net);
+float *network_predict_image(network *net, image im);
+void network_detect(network *net, image im, float thresh, float hier_thresh, float nms, detection *dets);
+detection *get_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, int *num);
+void free_detections(detection *dets, int n);
+
+void reset_network_state(network *net, int b);
+
+char **get_labels(char *filename);
+void do_nms_obj(detection *dets, int total, int classes, float thresh);
+void do_nms_sort(detection *dets, int total, int classes, float thresh);
+
+matrix make_matrix(int rows, int cols);
+
+void free_image(image m);
+float train_network(network *net, data d);
+pthread_t load_data_in_thread(load_args args);
+void load_data_blocking(load_args args);
+list *get_paths(char *filename);
+void hierarchy_predictions(float *predictions, int n, tree *hier, int only_leaves, int stride);
+void change_leaves(tree *t, char *leaf_list);
+
+int find_int_arg(int argc, char **argv, char *arg, int def);
+float find_float_arg(int argc, char **argv, char *arg, float def);
+int find_arg(int argc, char* argv[], char *arg);
+char *find_char_arg(int argc, char **argv, char *arg, char *def);
+char *basecfg(char *cfgfile);
+void find_replace(char *str, char *orig, char *rep, char *output);
+void free_ptrs(void **ptrs, int n);
+char *fgetl(FILE *fp);
+void strip(char *s);
+float sec(clock_t clocks);
+void **list_to_array(list *l);
+void top_k(float *a, int n, int k, int *index);
+int *read_map(char *filename);
+void error(const char *s);
+int max_index(float *a, int n);
+int max_int_index(int *a, int n);
+int sample_array(float *a, int n);
+int *random_index_order(int min, int max);
+void free_list(list *l);
+float mse_array(float *a, int n);
+float variance_array(float *a, int n);
+float mag_array(float *a, int n);
+void scale_array(float *a, int n, float s);
+float mean_array(float *a, int n);
+float sum_array(float *a, int n);
+void normalize_array(float *a, int n);
+int *read_intlist(char *s, int *n, int d);
+size_t rand_size_t();
+float rand_normal();
+float rand_uniform(float min, float max);
+
+#ifdef GPU
+void axpy_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY);
+void fill_gpu(int N, float ALPHA, float * X, int INCX);
+void scal_gpu(int N, float ALPHA, float * X, int INCX);
+void copy_gpu(int N, float * X, int INCX, float * Y, int INCY);
+
+void cuda_set_device(int n);
+void cuda_free(float *x_gpu);
+float *cuda_make_array(float *x, size_t n);
+void cuda_pull_array(float *x_gpu, float *x, size_t n);
+float cuda_mag_array(float *x_gpu, size_t n);
+void cuda_push_array(float *x_gpu, float *x, size_t n);
+
+void forward_network_gpu(network *net);
+void backward_network_gpu(network *net);
+void update_network_gpu(network *net);
+
+float train_networks(network **nets, int n, data d, int interval);
+void sync_nets(network **nets, int n, int interval);
+void harmless_update_network_gpu(network *net);
+#endif
+
+#ifndef __cplusplus
+#ifdef OPENCV
+image get_image_from_stream(CvCapture *cap);
+#endif
+#endif
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif
diff --git a/gosdk/sdk/darknet/include/yolo.hpp b/gosdk/sdk/darknet/include/yolo.hpp
new file mode 100644
index 0000000..00c2a5b
--- /dev/null
+++ b/gosdk/sdk/darknet/include/yolo.hpp
@@ -0,0 +1,641 @@
+#pragma once
+#ifdef YOLODLL_EXPORTS
+#if defined(_MSC_VER)
+#define YOLODLL_API __declspec(dllexport) 
+#else
+#define YOLODLL_API __attribute__((visibility("default")))
+#endif
+#else
+#if defined(_MSC_VER)
+#define YOLODLL_API __declspec(dllimport) 
+#else
+#define YOLODLL_API
+#endif
+#endif
+
+struct bbox_t {
+	unsigned int x, y, w, h;	// (x,y) - top-left corner, (w, h) - width & height of bounded box
+	float prob;					// confidence - probability that the object was found correctly
+	unsigned int obj_id;		// class of object - from range [0, classes-1]
+	unsigned int track_id;		// tracking id for video (0 - untracked, 1 - inf - tracked object)
+	unsigned int frames_counter;// counter of frames on which the object was detected
+};
+
+struct image_t {
+	int h;						// height
+	int w;						// width
+	int c;						// number of chanels (3 - for RGB)
+	float *data;				// pointer to the image data
+};
+
+#ifdef __cplusplus
+#include <memory>
+#include <vector>
+#include <deque>
+#include <algorithm>
+
+#ifdef OPENCV
+#include <opencv2/opencv.hpp>			// C++
+#include "opencv2/highgui/highgui_c.h"	// C
+#include "opencv2/imgproc/imgproc_c.h"	// C
+#endif	// OPENCV
+
+class Detector {
+	std::shared_ptr<void> detector_gpu_ptr;
+	std::deque<std::vector<bbox_t>> prev_bbox_vec_deque;
+	const int cur_gpu_id;
+public:
+	float nms = .4;
+	bool wait_stream;
+
+	YOLODLL_API Detector(std::string cfg_filename, std::string weight_filename, int gpu_id = 0);
+	YOLODLL_API ~Detector();
+
+	YOLODLL_API std::vector<bbox_t> detect(std::string image_filename, float thresh = 0.2, bool use_mean = false);
+	YOLODLL_API std::vector<bbox_t> detect(image_t img, float thresh = 0.2, bool use_mean = false);
+	static YOLODLL_API image_t load_image(std::string image_filename);
+	static YOLODLL_API void free_image(image_t m);
+	YOLODLL_API int get_net_width() const;
+	YOLODLL_API int get_net_height() const;
+
+	YOLODLL_API std::vector<bbox_t> tracking_id(std::vector<bbox_t> cur_bbox_vec, bool const change_history = true, 
+												int const frames_story = 10, int const max_dist = 150);
+
+	std::vector<bbox_t> detect_resized(image_t img, int init_w, int init_h, float thresh = 0.2, bool use_mean = false)
+	{
+		if (img.data == NULL)
+			throw std::runtime_error("Image is empty");
+		auto detection_boxes = detect(img, thresh, use_mean);
+		float wk = (float)init_w / img.w, hk = (float)init_h / img.h;
+		for (auto &i : detection_boxes) i.x *= wk, i.w *= wk, i.y *= hk, i.h *= hk;
+		return detection_boxes;
+	}
+
+#ifdef OPENCV
+	std::vector<bbox_t> detect(cv::Mat mat, float thresh = 0.2, bool use_mean = false)
+	{
+		if(mat.data == NULL)
+			throw std::runtime_error("Image is empty");
+		auto image_ptr = mat_to_image_resize(mat);
+		return detect_resized(*image_ptr, mat.cols, mat.rows, thresh, use_mean);
+	}
+
+	std::shared_ptr<image_t> mat_to_image_resize(cv::Mat mat) const
+	{
+		if (mat.data == NULL) return std::shared_ptr<image_t>(NULL);
+		cv::Mat det_mat;
+		cv::resize(mat, det_mat, cv::Size(get_net_width(), get_net_height()));
+		return mat_to_image(det_mat);
+	}
+
+	static std::shared_ptr<image_t> mat_to_image(cv::Mat img_src)
+	{
+		cv::Mat img;
+		cv::cvtColor(img_src, img, cv::COLOR_RGB2BGR);
+		std::shared_ptr<image_t> image_ptr(new image_t, [](image_t *img) { free_image(*img); delete img; });
+		std::shared_ptr<IplImage> ipl_small = std::make_shared<IplImage>(img);
+		*image_ptr = ipl_to_image(ipl_small.get());
+		return image_ptr;
+	}
+
+private:
+
+	static image_t ipl_to_image(IplImage* src)
+	{
+		unsigned char *data = (unsigned char *)src->imageData;
+		int h = src->height;
+		int w = src->width;
+		int c = src->nChannels;
+		int step = src->widthStep;
+		image_t out = make_image_custom(w, h, c);
+		int count = 0;
+
+		for (int k = 0; k < c; ++k) {
+			for (int i = 0; i < h; ++i) {
+				int i_step = i*step;
+				for (int j = 0; j < w; ++j) {
+					out.data[count++] = data[i_step + j*c + k] / 255.;
+				}
+			}
+		}
+
+		return out;
+	}
+
+	static image_t make_empty_image(int w, int h, int c)
+	{
+		image_t out;
+		out.data = 0;
+		out.h = h;
+		out.w = w;
+		out.c = c;
+		return out;
+	}
+
+	static image_t make_image_custom(int w, int h, int c)
+	{
+		image_t out = make_empty_image(w, h, c);
+		out.data = (float *)calloc(h*w*c, sizeof(float));
+		return out;
+	}
+
+#endif	// OPENCV
+
+};
+
+
+
+#if defined(TRACK_OPTFLOW) && defined(OPENCV) && defined(GPU)
+
+#include <opencv2/cudaoptflow.hpp>
+#include <opencv2/cudaimgproc.hpp>
+#include <opencv2/cudaarithm.hpp>
+#include <opencv2/core/cuda.hpp>
+
+class Tracker_optflow {
+public:
+	const int gpu_count;
+	const int gpu_id;
+	const int flow_error;
+
+
+	Tracker_optflow(int _gpu_id = 0, int win_size = 9, int max_level = 3, int iterations = 8000, int _flow_error = -1) :
+		gpu_count(cv::cuda::getCudaEnabledDeviceCount()), gpu_id(std::min(_gpu_id, gpu_count-1)),
+		flow_error((_flow_error > 0)? _flow_error:(win_size*4))
+	{
+		int const old_gpu_id = cv::cuda::getDevice();
+		cv::cuda::setDevice(gpu_id);
+
+		stream = cv::cuda::Stream();
+
+		sync_PyrLKOpticalFlow_gpu = cv::cuda::SparsePyrLKOpticalFlow::create();
+		sync_PyrLKOpticalFlow_gpu->setWinSize(cv::Size(win_size, win_size));	// 9, 15, 21, 31
+		sync_PyrLKOpticalFlow_gpu->setMaxLevel(max_level);		// +- 3 pt
+		sync_PyrLKOpticalFlow_gpu->setNumIters(iterations);	// 2000, def: 30
+
+		cv::cuda::setDevice(old_gpu_id);
+	}
+
+	// just to avoid extra allocations
+	cv::cuda::GpuMat src_mat_gpu;
+	cv::cuda::GpuMat dst_mat_gpu, dst_grey_gpu;
+	cv::cuda::GpuMat prev_pts_flow_gpu, cur_pts_flow_gpu;
+	cv::cuda::GpuMat status_gpu, err_gpu;
+
+	cv::cuda::GpuMat src_grey_gpu;	// used in both functions
+	cv::Ptr<cv::cuda::SparsePyrLKOpticalFlow> sync_PyrLKOpticalFlow_gpu;
+	cv::cuda::Stream stream;
+
+	std::vector<bbox_t> cur_bbox_vec;
+	std::vector<bool> good_bbox_vec_flags;
+	cv::Mat prev_pts_flow_cpu;
+
+	void update_cur_bbox_vec(std::vector<bbox_t> _cur_bbox_vec)
+	{
+		cur_bbox_vec = _cur_bbox_vec;
+		good_bbox_vec_flags = std::vector<bool>(cur_bbox_vec.size(), true);
+		cv::Mat prev_pts, cur_pts_flow_cpu;
+
+		for (auto &i : cur_bbox_vec) {
+			float x_center = (i.x + i.w / 2.0F);
+			float y_center = (i.y + i.h / 2.0F);
+			prev_pts.push_back(cv::Point2f(x_center, y_center));
+		}
+
+		if (prev_pts.rows == 0)
+			prev_pts_flow_cpu = cv::Mat();
+		else
+			cv::transpose(prev_pts, prev_pts_flow_cpu);
+
+		if (prev_pts_flow_gpu.cols < prev_pts_flow_cpu.cols) {
+			prev_pts_flow_gpu = cv::cuda::GpuMat(prev_pts_flow_cpu.size(), prev_pts_flow_cpu.type());
+			cur_pts_flow_gpu = cv::cuda::GpuMat(prev_pts_flow_cpu.size(), prev_pts_flow_cpu.type());
+
+			status_gpu = cv::cuda::GpuMat(prev_pts_flow_cpu.size(), CV_8UC1);
+			err_gpu = cv::cuda::GpuMat(prev_pts_flow_cpu.size(), CV_32FC1);
+		}
+
+		prev_pts_flow_gpu.upload(cv::Mat(prev_pts_flow_cpu), stream);
+	}
+
+
+	void update_tracking_flow(cv::Mat src_mat, std::vector<bbox_t> _cur_bbox_vec)
+	{
+		int const old_gpu_id = cv::cuda::getDevice();
+		if (old_gpu_id != gpu_id)
+			cv::cuda::setDevice(gpu_id);
+
+		if (src_mat.channels() == 3) {
+			if (src_mat_gpu.cols == 0) {
+				src_mat_gpu = cv::cuda::GpuMat(src_mat.size(), src_mat.type());
+				src_grey_gpu = cv::cuda::GpuMat(src_mat.size(), CV_8UC1);
+			}
+
+			update_cur_bbox_vec(_cur_bbox_vec);
+
+			//src_grey_gpu.upload(src_mat, stream);	// use BGR
+			src_mat_gpu.upload(src_mat, stream);
+			cv::cuda::cvtColor(src_mat_gpu, src_grey_gpu, CV_BGR2GRAY, 1, stream);
+		}
+		if (old_gpu_id != gpu_id)
+			cv::cuda::setDevice(old_gpu_id);
+	}
+
+
+	std::vector<bbox_t> tracking_flow(cv::Mat dst_mat, bool check_error = true)
+	{
+		if (sync_PyrLKOpticalFlow_gpu.empty()) {
+			std::cout << "sync_PyrLKOpticalFlow_gpu isn't initialized \n";
+			return cur_bbox_vec;
+		}
+
+		int const old_gpu_id = cv::cuda::getDevice();
+		if(old_gpu_id != gpu_id)
+			cv::cuda::setDevice(gpu_id);
+
+		if (dst_mat_gpu.cols == 0) {
+			dst_mat_gpu = cv::cuda::GpuMat(dst_mat.size(), dst_mat.type());
+			dst_grey_gpu = cv::cuda::GpuMat(dst_mat.size(), CV_8UC1);
+		}
+
+		//dst_grey_gpu.upload(dst_mat, stream);	// use BGR
+		dst_mat_gpu.upload(dst_mat, stream);
+		cv::cuda::cvtColor(dst_mat_gpu, dst_grey_gpu, CV_BGR2GRAY, 1, stream);
+
+		if (src_grey_gpu.rows != dst_grey_gpu.rows || src_grey_gpu.cols != dst_grey_gpu.cols) {
+			stream.waitForCompletion();
+			src_grey_gpu = dst_grey_gpu.clone();
+			cv::cuda::setDevice(old_gpu_id);
+			return cur_bbox_vec;
+		}
+
+		////sync_PyrLKOpticalFlow_gpu.sparse(src_grey_gpu, dst_grey_gpu, prev_pts_flow_gpu, cur_pts_flow_gpu, status_gpu, &err_gpu);	// OpenCV 2.4.x
+		sync_PyrLKOpticalFlow_gpu->calc(src_grey_gpu, dst_grey_gpu, prev_pts_flow_gpu, cur_pts_flow_gpu, status_gpu, err_gpu, stream);	// OpenCV 3.x
+
+		cv::Mat cur_pts_flow_cpu;
+		cur_pts_flow_gpu.download(cur_pts_flow_cpu, stream);
+
+		dst_grey_gpu.copyTo(src_grey_gpu, stream);
+
+		cv::Mat err_cpu, status_cpu;
+		err_gpu.download(err_cpu, stream);
+		status_gpu.download(status_cpu, stream);
+
+		stream.waitForCompletion();
+
+		std::vector<bbox_t> result_bbox_vec;
+
+		if (err_cpu.cols == cur_bbox_vec.size() && status_cpu.cols == cur_bbox_vec.size()) 
+		{
+			for (size_t i = 0; i < cur_bbox_vec.size(); ++i)
+			{
+				cv::Point2f cur_key_pt = cur_pts_flow_cpu.at<cv::Point2f>(0, i);
+				cv::Point2f prev_key_pt = prev_pts_flow_cpu.at<cv::Point2f>(0, i);
+
+				float moved_x = cur_key_pt.x - prev_key_pt.x;
+				float moved_y = cur_key_pt.y - prev_key_pt.y;
+
+				if (abs(moved_x) < 100 && abs(moved_y) < 100 && good_bbox_vec_flags[i])
+					if (err_cpu.at<float>(0, i) < flow_error && status_cpu.at<unsigned char>(0, i) != 0 &&
+						((float)cur_bbox_vec[i].x + moved_x) > 0 && ((float)cur_bbox_vec[i].y + moved_y) > 0)
+					{
+						cur_bbox_vec[i].x += moved_x + 0.5;
+						cur_bbox_vec[i].y += moved_y + 0.5;
+						result_bbox_vec.push_back(cur_bbox_vec[i]);
+					}
+					else good_bbox_vec_flags[i] = false;
+				else good_bbox_vec_flags[i] = false;
+
+				//if(!check_error && !good_bbox_vec_flags[i]) result_bbox_vec.push_back(cur_bbox_vec[i]);
+			}
+		}
+
+		cur_pts_flow_gpu.swap(prev_pts_flow_gpu);
+		cur_pts_flow_cpu.copyTo(prev_pts_flow_cpu);
+
+		if (old_gpu_id != gpu_id)
+			cv::cuda::setDevice(old_gpu_id);
+
+		return result_bbox_vec;
+	}
+
+};
+
+#elif defined(TRACK_OPTFLOW) && defined(OPENCV)
+
+//#include <opencv2/optflow.hpp>
+#include <opencv2/video/tracking.hpp>
+
+class Tracker_optflow {
+public:
+	const int flow_error;
+
+
+	Tracker_optflow(int win_size = 9, int max_level = 3, int iterations = 8000, int _flow_error = -1) :
+		flow_error((_flow_error > 0)? _flow_error:(win_size*4))
+	{
+		sync_PyrLKOpticalFlow = cv::SparsePyrLKOpticalFlow::create();
+		sync_PyrLKOpticalFlow->setWinSize(cv::Size(win_size, win_size));	// 9, 15, 21, 31
+		sync_PyrLKOpticalFlow->setMaxLevel(max_level);		// +- 3 pt
+
+	}
+
+	// just to avoid extra allocations
+	cv::Mat dst_grey;
+	cv::Mat prev_pts_flow, cur_pts_flow;
+	cv::Mat status, err;
+
+	cv::Mat src_grey;	// used in both functions
+	cv::Ptr<cv::SparsePyrLKOpticalFlow> sync_PyrLKOpticalFlow;
+
+	std::vector<bbox_t> cur_bbox_vec;
+	std::vector<bool> good_bbox_vec_flags;
+
+	void update_cur_bbox_vec(std::vector<bbox_t> _cur_bbox_vec)
+	{
+		cur_bbox_vec = _cur_bbox_vec;
+		good_bbox_vec_flags = std::vector<bool>(cur_bbox_vec.size(), true);
+		cv::Mat prev_pts, cur_pts_flow;
+
+		for (auto &i : cur_bbox_vec) {
+			float x_center = (i.x + i.w / 2.0F);
+			float y_center = (i.y + i.h / 2.0F);
+			prev_pts.push_back(cv::Point2f(x_center, y_center));
+		}
+
+		if (prev_pts.rows == 0)
+			prev_pts_flow = cv::Mat();
+		else
+			cv::transpose(prev_pts, prev_pts_flow);
+	}
+
+
+	void update_tracking_flow(cv::Mat new_src_mat, std::vector<bbox_t> _cur_bbox_vec)
+	{
+		if (new_src_mat.channels() == 3) {
+
+			update_cur_bbox_vec(_cur_bbox_vec);
+
+			cv::cvtColor(new_src_mat, src_grey, CV_BGR2GRAY, 1);
+		}
+	}
+
+
+	std::vector<bbox_t> tracking_flow(cv::Mat new_dst_mat, bool check_error = true)
+	{
+		if (sync_PyrLKOpticalFlow.empty()) {
+			std::cout << "sync_PyrLKOpticalFlow isn't initialized \n";
+			return cur_bbox_vec;
+		}
+
+		cv::cvtColor(new_dst_mat, dst_grey, CV_BGR2GRAY, 1);
+
+		if (src_grey.rows != dst_grey.rows || src_grey.cols != dst_grey.cols) {
+			src_grey = dst_grey.clone();
+			return cur_bbox_vec;
+		}
+
+		if (prev_pts_flow.cols < 1) {
+			return cur_bbox_vec;
+		}
+
+		////sync_PyrLKOpticalFlow_gpu.sparse(src_grey_gpu, dst_grey_gpu, prev_pts_flow_gpu, cur_pts_flow_gpu, status_gpu, &err_gpu);	// OpenCV 2.4.x
+		sync_PyrLKOpticalFlow->calc(src_grey, dst_grey, prev_pts_flow, cur_pts_flow, status, err);	// OpenCV 3.x
+
+		dst_grey.copyTo(src_grey);
+
+		std::vector<bbox_t> result_bbox_vec;
+
+		if (err.rows == cur_bbox_vec.size() && status.rows == cur_bbox_vec.size())
+		{
+			for (size_t i = 0; i < cur_bbox_vec.size(); ++i)
+			{
+				cv::Point2f cur_key_pt = cur_pts_flow.at<cv::Point2f>(0, i);
+				cv::Point2f prev_key_pt = prev_pts_flow.at<cv::Point2f>(0, i);
+
+				float moved_x = cur_key_pt.x - prev_key_pt.x;
+				float moved_y = cur_key_pt.y - prev_key_pt.y;
+
+				if (abs(moved_x) < 100 && abs(moved_y) < 100 && good_bbox_vec_flags[i])
+					if (err.at<float>(0, i) < flow_error && status.at<unsigned char>(0, i) != 0 &&
+						((float)cur_bbox_vec[i].x + moved_x) > 0 && ((float)cur_bbox_vec[i].y + moved_y) > 0)
+					{
+						cur_bbox_vec[i].x += moved_x + 0.5;
+						cur_bbox_vec[i].y += moved_y + 0.5;
+						result_bbox_vec.push_back(cur_bbox_vec[i]);
+					}
+					else good_bbox_vec_flags[i] = false;
+				else good_bbox_vec_flags[i] = false;
+
+				//if(!check_error && !good_bbox_vec_flags[i]) result_bbox_vec.push_back(cur_bbox_vec[i]);
+			}
+		}
+
+		prev_pts_flow = cur_pts_flow.clone();
+
+		return result_bbox_vec;
+	}
+
+};
+#else
+
+class Tracker_optflow {};
+
+#endif	// defined(TRACK_OPTFLOW) && defined(OPENCV)
+
+
+#ifdef OPENCV
+
+static cv::Scalar obj_id_to_color(int obj_id) {
+	int const colors[6][3] = { { 1,0,1 },{ 0,0,1 },{ 0,1,1 },{ 0,1,0 },{ 1,1,0 },{ 1,0,0 } };
+	int const offset = obj_id * 123457 % 6;
+	int const color_scale = 150 + (obj_id * 123457) % 100;
+	cv::Scalar color(colors[offset][0], colors[offset][1], colors[offset][2]);
+	color *= color_scale;
+	return color;
+}
+
+class preview_boxes_t {
+	enum { frames_history = 30 };	// how long to keep the history saved
+
+	struct preview_box_track_t {
+		unsigned int track_id, obj_id, last_showed_frames_ago;
+		bool current_detection;
+		bbox_t bbox;
+		cv::Mat mat_obj, mat_resized_obj;
+		preview_box_track_t() : track_id(0), obj_id(0), last_showed_frames_ago(frames_history), current_detection(false) {}
+	};
+	std::vector<preview_box_track_t> preview_box_track_id;
+	size_t const preview_box_size, bottom_offset;
+	bool const one_off_detections;
+public:
+	preview_boxes_t(size_t _preview_box_size = 100, size_t _bottom_offset = 100, bool _one_off_detections = false) :
+		preview_box_size(_preview_box_size), bottom_offset(_bottom_offset), one_off_detections(_one_off_detections)
+	{}
+
+	void set(cv::Mat src_mat, std::vector<bbox_t> result_vec)
+	{
+		size_t const count_preview_boxes = src_mat.cols / preview_box_size;
+		if (preview_box_track_id.size() != count_preview_boxes) preview_box_track_id.resize(count_preview_boxes);
+
+		// increment frames history
+		for (auto &i : preview_box_track_id)
+			i.last_showed_frames_ago = std::min((unsigned)frames_history, i.last_showed_frames_ago + 1);
+
+		// occupy empty boxes
+		for (auto &k : result_vec) {
+			bool found = false;
+			// find the same (track_id)
+			for (auto &i : preview_box_track_id) {
+				if (i.track_id == k.track_id) {
+					if (!one_off_detections) i.last_showed_frames_ago = 0; // for tracked objects
+					found = true;
+					break;
+				}
+			}
+			if (!found) {
+				// find empty box
+				for (auto &i : preview_box_track_id) {
+					if (i.last_showed_frames_ago == frames_history) {
+						if (!one_off_detections && k.frames_counter == 0) break; // don't show if obj isn't tracked yet
+						i.track_id = k.track_id;
+						i.obj_id = k.obj_id;
+						i.bbox = k;
+						i.last_showed_frames_ago = 0;
+						break;
+					}
+				}
+			}
+		}
+
+		// draw preview box (from old or current frame)
+		for (size_t i = 0; i < preview_box_track_id.size(); ++i)
+		{
+			// get object image
+			cv::Mat dst = preview_box_track_id[i].mat_resized_obj;
+			preview_box_track_id[i].current_detection = false;
+
+			for (auto &k : result_vec) {
+				if (preview_box_track_id[i].track_id == k.track_id) {
+					if (one_off_detections && preview_box_track_id[i].last_showed_frames_ago > 0) {
+						preview_box_track_id[i].last_showed_frames_ago = frames_history; break;
+					}
+					bbox_t b = k;
+					cv::Rect r(b.x, b.y, b.w, b.h);
+					cv::Rect img_rect(cv::Point2i(0, 0), src_mat.size());
+					cv::Rect rect_roi = r & img_rect;
+					if (rect_roi.width > 1 || rect_roi.height > 1) {
+						cv::Mat roi = src_mat(rect_roi);
+						cv::resize(roi, dst, cv::Size(preview_box_size, preview_box_size), cv::INTER_NEAREST);
+						preview_box_track_id[i].mat_obj = roi.clone();
+						preview_box_track_id[i].mat_resized_obj = dst.clone();
+						preview_box_track_id[i].current_detection = true;
+						preview_box_track_id[i].bbox = k;
+					}
+					break;
+				}
+			}
+		}
+	}
+
+
+	void draw(cv::Mat draw_mat, bool show_small_boxes = false)
+	{
+		// draw preview box (from old or current frame)
+		for (size_t i = 0; i < preview_box_track_id.size(); ++i)
+		{
+			auto &prev_box = preview_box_track_id[i];
+
+			// draw object image
+			cv::Mat dst = prev_box.mat_resized_obj;
+			if (prev_box.last_showed_frames_ago < frames_history &&
+				dst.size() == cv::Size(preview_box_size, preview_box_size))
+			{
+				cv::Rect dst_rect_roi(cv::Point2i(i * preview_box_size, draw_mat.rows - bottom_offset), dst.size());
+				cv::Mat dst_roi = draw_mat(dst_rect_roi);
+				dst.copyTo(dst_roi);
+
+				cv::Scalar color = obj_id_to_color(prev_box.obj_id);
+				int thickness = (prev_box.current_detection) ? 5 : 1;
+				cv::rectangle(draw_mat, dst_rect_roi, color, thickness);
+
+				unsigned int const track_id = prev_box.track_id;
+				std::string track_id_str = (track_id > 0) ? std::to_string(track_id) : "";
+				putText(draw_mat, track_id_str, dst_rect_roi.tl() - cv::Point2i(-4, 5), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.9, cv::Scalar(0, 0, 0), 2);
+
+				std::string size_str = std::to_string(prev_box.bbox.w) + "x" + std::to_string(prev_box.bbox.h);
+				putText(draw_mat, size_str, dst_rect_roi.tl() + cv::Point2i(0, 12), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cv::Scalar(0, 0, 0), 1);
+
+				if (!one_off_detections && prev_box.current_detection) {
+					cv::line(draw_mat, dst_rect_roi.tl() + cv::Point2i(preview_box_size, 0),
+						cv::Point2i(prev_box.bbox.x, prev_box.bbox.y + prev_box.bbox.h),
+						color);
+				}
+
+				if (one_off_detections && show_small_boxes) {
+					cv::Rect src_rect_roi(cv::Point2i(prev_box.bbox.x, prev_box.bbox.y),
+						cv::Size(prev_box.bbox.w, prev_box.bbox.h));
+					unsigned int const color_history = (255 * prev_box.last_showed_frames_ago) / frames_history;
+					color = cv::Scalar(255 - 3 * color_history, 255 - 2 * color_history, 255 - 1 * color_history);
+					if (prev_box.mat_obj.size() == src_rect_roi.size()) {
+						prev_box.mat_obj.copyTo(draw_mat(src_rect_roi));
+					}
+					cv::rectangle(draw_mat, src_rect_roi, color, thickness);
+					putText(draw_mat, track_id_str, src_rect_roi.tl() - cv::Point2i(0, 10), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cv::Scalar(0, 0, 0), 1);
+				}
+			}
+		}
+	}
+};
+#endif	// OPENCV
+
+//extern "C" {
+#endif	// __cplusplus
+
+/*
+	// C - wrappers
+	YOLODLL_API void create_detector(char const* cfg_filename, char const* weight_filename, int gpu_id);
+	YOLODLL_API void delete_detector();
+	YOLODLL_API bbox_t* detect_custom(image_t img, float thresh, bool use_mean, int *result_size);
+	YOLODLL_API bbox_t* detect_resized(image_t img, int init_w, int init_h, float thresh, bool use_mean, int *result_size);
+	YOLODLL_API bbox_t* detect(image_t img, int *result_size);
+	YOLODLL_API image_t load_img(char *image_filename);
+	YOLODLL_API void free_img(image_t m);
+
+#ifdef __cplusplus
+}	// extern "C"
+
+static std::shared_ptr<void> c_detector_ptr;
+static std::vector<bbox_t> c_result_vec;
+
+void create_detector(char const* cfg_filename, char const* weight_filename, int gpu_id) {
+	c_detector_ptr = std::make_shared<YOLODLL_API Detector>(cfg_filename, weight_filename, gpu_id);
+}
+
+void delete_detector() { c_detector_ptr.reset(); }
+
+bbox_t* detect_custom(image_t img, float thresh, bool use_mean, int *result_size) {
+	c_result_vec = static_cast<Detector*>(c_detector_ptr.get())->detect(img, thresh, use_mean);
+	*result_size = c_result_vec.size();
+	return c_result_vec.data();
+}
+
+bbox_t* detect_resized(image_t img, int init_w, int init_h, float thresh, bool use_mean, int *result_size) {
+	c_result_vec = static_cast<Detector*>(c_detector_ptr.get())->detect_resized(img, init_w, init_h, thresh, use_mean);
+	*result_size = c_result_vec.size();
+	return c_result_vec.data();
+}
+
+bbox_t* detect(image_t img, int *result_size) {
+	return detect_custom(img, 0.24, true, result_size);
+}
+
+image_t load_img(char *image_filename) {
+	return static_cast<Detector*>(c_detector_ptr.get())->load_image(image_filename);
+}
+void free_img(image_t m) {
+	static_cast<Detector*>(c_detector_ptr.get())->free_image(m);
+}
+
+#endif	// __cplusplus
+*/
diff --git a/gosdk/sdk/face/include/FiStdDefEx.h b/gosdk/sdk/face/include/FiStdDefEx.h
new file mode 100644
index 0000000..222b9d0
--- /dev/null
+++ b/gosdk/sdk/face/include/FiStdDefEx.h
@@ -0,0 +1,33 @@
+#ifndef _FI_STD_DEF_EX_H_
+#define _FI_STD_DEF_EX_H_
+
+#ifndef WIN32
+
+typedef struct tagPOINT
+{
+	int x, y;
+}POINT;
+
+typedef struct tagSIZE
+{
+	int cx, cy;
+}SIZE;
+
+typedef struct tagRECT
+{
+	int left, top, right, bottom;
+}RECT;
+
+typedef unsigned char BYTE;
+typedef unsigned short WORD;
+typedef unsigned int DWORD;
+
+#endif
+
+/*
+typedef struct tagPointF {
+	float x;
+	float y;
+} TPointF;
+*/
+#endif // _FI_STD_DEF_EX_H_
diff --git a/gosdk/sdk/face/include/THFaceImage_i.h b/gosdk/sdk/face/include/THFaceImage_i.h
new file mode 100644
index 0000000..7910496
--- /dev/null
+++ b/gosdk/sdk/face/include/THFaceImage_i.h
@@ -0,0 +1,174 @@
+#ifndef THFACEIMAGE_I_H
+#define THFACEIMAGE_I_H
+
+#include "FiStdDefEx.h"
+
+/*
+* ============================================================================
+*  Name     : THFaceImage_i.h
+*  Part of  : Face Recognition (THFaceImage) SDK
+*  Created  : 9.1.2016 by XXX
+*  Description:
+*     THFaceImage_i.h -  Face Recognition (THFaceImage) SDK header file
+*  Version  : 4.0.0
+*  Copyright: All Rights Reserved by XXXX
+*  Revision:
+* ============================================================================
+*/
+
+#define THFACEIMAGE_API extern "C"
+
+//////Struct define//////
+
+struct FaceAngle
+{
+	int   yaw;//angle of yaw,from -90 to +90,left is negative,right is postive
+	int   pitch;//angle of pitch,from -90 to +90,up is negative,down is postive
+	int   roll;//angle of roll,from -90 to +90,left is negative,right is postive
+	float confidence;//confidence of face pose(from 0 to 1,0.6 is suggested threshold)
+};
+
+struct THFI_FacePos
+{
+    RECT		rcFace;//coordinate of face
+   	POINT		ptLeftEye;//coordinate of left eye
+	POINT		ptRightEye;//coordinate of right eye
+	POINT		ptMouth;//coordinate of mouth
+	POINT		ptNose;//coordinate of nose								
+	FaceAngle	fAngle;//value of face angle
+	int			nQuality;//quality of face(from 0 to 100)
+	BYTE   		pFacialData[512];//facial data
+	THFI_FacePos()
+	{
+		memset(&rcFace,0,sizeof(RECT));
+		memset(&ptLeftEye,0,sizeof(POINT));
+		memset(&ptRightEye,0,sizeof(POINT));
+		memset(&ptMouth,0,sizeof(POINT));
+		memset(&ptNose,0,sizeof(POINT));
+		memset(&fAngle,0,sizeof(FaceAngle));
+		nQuality=0;
+		memset(pFacialData, 0, 512);
+	}
+};
+
+typedef long long DWORD_PTR;
+struct THFI_Param
+{
+	int nMinFaceSize;//min face width size can be detected,default is 50 pixels
+	int nRollAngle;//max face roll angle,default is 30(degree)
+	bool bOnlyDetect;//ingored
+	DWORD_PTR dwReserved;//reserved value,must be NULL
+	THFI_Param()
+	{
+		nMinFaceSize=50;
+		nRollAngle=30;
+		bOnlyDetect=false;
+		dwReserved=NULL;
+	}
+};
+
+struct THFI_Param_Ex
+{
+	THFI_Param tp;
+	int nDeviceID;//device id for GPU device.eg:0,1,2,3.....
+	THFI_Param_Ex()
+	{
+		nDeviceID = 0;
+	}
+};
+
+//////API define//////
+
+THFACEIMAGE_API int		THFI_Create(short nChannelNum,THFI_Param* pParam);
+/*
+ The THFI_Create function will initialize the algorithm engine module
+
+ Parameters:
+	nChannelNum[intput],algorithm channel num,for multi-thread mode,one thread uses one channel
+	pParam[input],algorithm engine parameter.
+ Return Values:
+	If the function succeeds, the return value is valid channel number.
+	If the function fails, the return value is zero or negative;
+	error code:
+		-99,invalid license.
+ Remarks: 
+	This function only can be called one time at program initialization.
+*/
+
+THFACEIMAGE_API int		THFI_DetectFace(short nChannelID, BYTE* pImage, int bpp, int nWidth, int nHeight, THFI_FacePos* pfps, int nMaxFaceNums, int nSampleSize=640);
+/*
+ The THFI_DetectFace function execute face detection only.
+
+ Parameters:
+	nChannelID[input],channel ID(from 0 to nChannelNum-1)
+	pImage[input],image data buffer,RGB24 format.
+	bpp[input],bits per pixel(24-RGB24 image),must be 24
+	nWidth[input],image width.
+	nHeight[input],image height.
+	pfps[output],the facial position information.
+	nMaxFaceNums[input],max face nums that you want
+	nSampleSize[input],down sample size(image down sample) for detect image,if it is 0,will detect by original image.
+ Return Values:
+	If the function succeeds, the return value is face number.
+	If the function fails, the return value is negative.
+	error code:
+		-99,invalid license.
+		-1,nChannelID is invalid or SDK is not initialized
+		-2,image data is invalid,please check function parameter:pImage,bpp,nWidth,nHeight
+		-3,pfps or nMaxFaceNums is invalid.
+ Remarks:
+	1.image data buffer(pImage)	size must be nWidth*(bpp/8)*nHeight.
+	2.pfps must be allocated by caller,the memory size is nMaxFaceNums*sizeof(THFI_FacePos).
+	3.if image has face(s),face number less than or equal to nMaxFaceNums
+*/
+
+THFACEIMAGE_API int THFI_DetectFaceByEye(short nChannelID, BYTE* pImage, int nWidth, int nHeight, POINT ptLeft, POINT ptRight, THFI_FacePos* pfps);
+/*
+The THFI_DetectFaceByEye function detect facial data by eye position
+
+Parameters:
+	pImage[input],image data buffer,rgb24 format,pImage data size must be nWidth*nHeight*3 bytes
+	nWidth[input],image width.
+	nHeight[input],image height.
+	ptLeft[input],left eye position
+	ptRight[input],right eye position
+	pfps[output],the facial position information.
+Return Values:
+	If the function succeeds, the return value is 1.
+	If the function fails, the return value is negative.
+	error code:
+		-99,invalid license.
+		-1,nChannelID is invalid or SDK is not initialize
+		-2,image data is invalid,please check function parameter:pImage,bpp,nWidth,nHeight
+		-3,pfps or nMaxFaceNums is invalid.
+*/
+
+THFACEIMAGE_API void	THFI_Release();
+/*
+ The THFI_Release function will release the algorithm engine module
+
+ Parameters:
+	No parameter.
+ Return Values:
+	No return value.
+ Remarks:
+	This function only can be called one time at program exit.
+*/
+
+THFACEIMAGE_API int		THFI_Create_Ex(short nChannelNum, THFI_Param_Ex* pParam);
+/*
+The THFI_Create_Ex function will initialize the algorithm engine module,,only for GPU version
+
+Parameters:
+nChannelNum[intput],algorithm channel num,for multi-thread mode,one thread uses one channel
+pParam[input],algorithm engine parameter.
+Return Values:
+If the function succeeds, the return value is valid channel number.
+If the function fails, the return value is zero or negative;
+error code:
+-99,invalid license.
+Remarks:
+This function only can be called one time at program initialization.
+*/
+
+#endif
diff --git a/gosdk/sdk/face/include/THFaceLive_i.h b/gosdk/sdk/face/include/THFaceLive_i.h
new file mode 100644
index 0000000..a0c5574
--- /dev/null
+++ b/gosdk/sdk/face/include/THFaceLive_i.h
@@ -0,0 +1,62 @@
+#ifndef THFACELIVE_I_H
+#define THFACELIVE_I_H
+
+/*
+* ============================================================================
+*  Name     : THFaceLive_i.h
+*  Part of  : Face Liveness Detect (THFaceLive) SDK
+*  Created  : 9.1.2017 by XXX
+*  Description:
+*     THFaceLive_i.h -  Face Liveness Detect (THFaceLive) SDK header file
+*  Version  : 2.0.0
+*  Copyright: All Rights Reserved by XXXX
+*  Revision:
+* ============================================================================
+*/
+#include "THFaceImage_i.h"
+
+#define THFACELIVE_API extern "C"
+
+THFACELIVE_API int	THFL_Create();
+/*
+The THFL_Create function will initialize the algorithm engine module
+
+Parameters:
+	No parameter.
+Return Values:
+	If the function succeeds, the return value is 1.
+	If the function fails, the return value is negative;
+Remarks:
+	This function only can be called one time at program initialization.
+*/
+
+THFACELIVE_API int	THFL_Detect(unsigned char* pBuf_color, unsigned char* pBuf_bw, int nWidth, int nHeight, THFI_FacePos*  ptfp_color, THFI_FacePos* ptfp_bw, int nThreshold=30);
+/*
+The THFL_Detect function execute face liveness detection
+
+Parameters:
+	pBuf_color[input],color camera image data buffer,bgr format.
+	pBuf_bw[input],black-white camera image data buffer,bgr format.
+	nWidth[input],image width.
+	nHeight[input],image height.
+	ptfp_color[input],face data of color camera image.(THFI_FacePos format,return by THFI_DetectFace of THFaceImage SDK)
+	ptfp_bw[input],face data of black-white camera image.(THFI_FacePos format,return by THFI_DetectFace of THFaceImage SDK)
+	nThreshold[input],score threshold(sugguest value is 30)
+Return Values:
+	If the function succeeds, the return value is 0 or 1.(0->fake face,1->live face)
+	If the function fails, the return value is negative.
+Remarks:
+*/
+THFACELIVE_API void	THFL_Release();
+/*
+The THFL_Release function will release the algorithm engine module
+
+Parameters:
+	No parameter.
+Return Values:
+	No return value.
+Remarks:
+	This function only can be called one time at program exit.
+*/
+
+#endif
diff --git a/gosdk/sdk/face/include/THFaceProperty_i.h b/gosdk/sdk/face/include/THFaceProperty_i.h
new file mode 100644
index 0000000..4f5aaa6
--- /dev/null
+++ b/gosdk/sdk/face/include/THFaceProperty_i.h
@@ -0,0 +1,153 @@
+#ifndef THFACEPROP_I_H
+#define THFACEPROP_I_H
+
+#include "THFaceImage_i.h"
+
+/*
+* ============================================================================
+*  Name     : THFaceProperty_i.h
+*  Part of  : Face Property (THFaceProperty) SDK
+*  Created  : 7.8.2016 by XXX
+*  Description:
+*     THFaceProp_i.h -  Face Property (THFaceProperty) SDK header file
+*  Version  : 1.0.0
+*  Copyright: All Rights Reserved by XXXX
+*  Revision:
+* ============================================================================
+*/
+
+struct THFP_Result_V1
+{
+	int gender;//1-male,0-female
+	int age;//range[0-100]
+	int beauty_level;//range[0-100]
+	int smile_level;//range[0-100]
+};
+
+struct THFP_Result_V2
+{
+	int gender;//1-male,0-female
+	int age;//range[0-100]
+	int race; //[1-white,2-yellow,3-black]
+	int beauty_level;//range[0-100]
+	int smile_level;//range[0-100]
+};
+
+#define THFACEPROP_API extern "C"
+
+THFACEPROP_API int		THFP_Create(short nChannelNum);
+/*
+ The THFP_Create function will initialize the algorithm engine module
+
+ Parameters:
+	nChannelNum[intput],algorithm channel num,for multi-thread mode,one thread uses one channel
+ Return Values:
+	If the function succeeds, the return value is valid channel number.
+	If the function fails, the return value is zero or negative;
+	error code:
+		-99,invalid license.
+ Remarks: 
+	This function only can be called one time at program initialization.
+*/
+
+THFACEPROP_API void		THFP_Release();
+/*
+The THFP_Release function will release the algorithm engine module
+
+Parameters:
+	No parameter.
+Return Values:
+	No return value.
+Remarks:
+	This function only can be called one time at program exit.
+*/
+
+THFACEPROP_API int		THFP_Execute_V1(short nChannelID, BYTE* pBGR, int nWidth, int nHeight, THFI_FacePos* ptfp, THFP_Result_V1* pResult);
+/*
+The THFP_Execute_V1 function execute face property analysis.
+
+Parameters:
+	nChannelID[input],channel ID(from 0 to nChannelNum-1)
+	pBGR[input],point to an image buffer,BGR format.
+	nWidth[input],the image width.
+	nHeight[input],the image height.
+	ptfp[input],the facial data of a face.
+	pResult[output],the face property result
+Return Values:
+	If the function succeeds, the return value is 0.
+	If the function fails, the return value is nagative.
+	error code:
+		-99,invalid license.
+		-1,pBuf,ptfp,pFeature is NULL
+		-2,nChannelID is invalid or SDK is not initialized
+Remarks:
+	No remark.
+*/
+THFACEPROP_API int		THFP_Execute_1N_V1(short nChannelID, BYTE* pBGR, int nWidth, int nHeight, THFI_FacePos* ptfps, THFP_Result_V1* pResults,int nFaceCount);
+/*
+The THFP_Execute_1N_V1 function execute face property analysis.
+
+Parameters:
+	nChannelID[input],channel ID(from 0 to nChannelNum-1)
+	pBGR[input],point to an image buffer,BGR format.
+	nWidth[input],the image width.
+	nHeight[input],the image height.
+	ptfps[input],the facial data of muti-faces
+	pResults[output],the face property results of muti-faces
+	nFaceCount[input],the face number
+Return Values:
+	If the function succeeds, the return value is 0.
+	If the function fails, the return value is nagative.
+	error code:
+		-99,invalid license.
+		-1,pBGR,ptfps,pResults is NULL,OR nFaceCount is less than 1
+		-2,nChannelID is invalid or SDK is not initialized
+Remarks:
+	No remark.
+*/
+THFACEPROP_API int		THFP_Execute_V2(short nChannelID, BYTE* pBGR, int nWidth, int nHeight, THFI_FacePos* ptfp, THFP_Result_V2* pResult);
+/*
+The THFP_Execute_V2 function execute face property analysis.
+
+Parameters:
+	nChannelID[input],channel ID(from 0 to nChannelNum-1)
+	pBGR[input],point to an image buffer,BGR format.
+	nWidth[input],the image width.
+	nHeight[input],the image height.
+	ptfp[input],the facial data of a face.
+	pResult[output],the face property result
+Return Values:
+	If the function succeeds, the return value is 0.
+	If the function fails, the return value is nagative.
+error code:
+	-99,invalid license.
+	-1,pBGR,ptfp,pResult is NULL
+	-2,nChannelID is invalid or SDK is not initialized
+Remarks:
+	No remark.
+*/
+
+THFACEPROP_API int		THFP_Execute_1N_V2(short nChannelID, BYTE* pBGR, int nWidth, int nHeight, THFI_FacePos* ptfps, THFP_Result_V2* pResults, int nFaceCount);
+/*
+The THFP_Execute_1N_V2 function execute face property analysis.
+
+Parameters:
+	nChannelID[input],channel ID(from 0 to nChannelNum-1)
+	pBGR[input],point to an image buffer,BGR format.
+	nWidth[input],the image width.
+	nHeight[input],the image height.
+	ptfps[input],the facial data of muti-faces
+	pResults[output],the face property results of muti-faces
+	nFaceCount[input],the face number
+Return Values:
+	If the function succeeds, the return value is 0.
+	If the function fails, the return value is nagative.
+error code:
+	-99,invalid license.
+	-1,pBGR,ptfps,pResults is NULL,OR nFaceCount is less than 1
+	-2,nChannelID is invalid or SDK is not initialized
+Remarks:
+	No remark.
+*/
+
+#endif
diff --git a/gosdk/sdk/face/include/THFaceTracking_i.h b/gosdk/sdk/face/include/THFaceTracking_i.h
new file mode 100644
index 0000000..895631a
--- /dev/null
+++ b/gosdk/sdk/face/include/THFaceTracking_i.h
@@ -0,0 +1,180 @@
+#ifndef THFACETRACKING_I_H
+#define THFACETRACKING_I_H
+
+#include "FiStdDefEx.h"
+
+/*
+* ============================================================================
+*  Name     : THFaceTracking_i.h
+*  Part of  : Face Tracking (THFaceTracking) SDK
+*  Created  : 11.22.2017 by XXX
+*  Description:
+*     THFaceTracking_i.h -   Face Tracking (THFaceTracking) SDK header file
+*  Version  : 1.0.0
+*  Copyright: All Rights Reserved by XXXX
+*  Revision:
+* ============================================================================
+*/
+
+struct FacePose
+{
+	int   yaw;//angle of yaw,from -90 to +90,left is negative,right is postive
+	int   pitch;//angle of pitch,from -90 to +90,up is negative,down is postive
+	int   roll;//angle of roll,from -90 to +90,left is negative,right is postive
+	float confidence;//confidence of face pose(from 0 to 1,0.6 is suggested threshold)
+};
+
+struct THFT_FaceInfo
+{
+	RECT		rcFace;//coordinate of face
+	POINT		ptLeftEye;//coordinate of left eye
+	POINT		ptRightEye;//coordinate of right eye
+	POINT		ptMouth;//coordinate of mouth
+	POINT		ptNose;//coordinate of nose								
+	FacePose	fAngle;//value of face angle
+	int			nQuality;//quality of face(from 0 to 100)
+	BYTE   		pFacialData[8*1024];//facial data
+
+	long		nFaceID;//face tracking id
+
+	THFT_FaceInfo()
+	{
+		memset(&rcFace, 0, sizeof(RECT));
+		memset(&ptLeftEye, 0, sizeof(POINT));
+		memset(&ptRightEye, 0, sizeof(POINT));
+		memset(&ptMouth, 0, sizeof(POINT));
+		memset(&ptNose, 0, sizeof(POINT));
+		memset(&fAngle, 0, sizeof(FacePose));
+		nQuality = 0;
+		memset(pFacialData, 0, 8 * 1024);
+
+		nFaceID = -1;
+	}
+};
+
+struct THFT_Param
+{
+	int nDeviceID;//device id for GPU device.eg:0,1,2,3.....
+
+	int nImageWidth;//image width of video
+	int nImageHeight;//image height of video
+	int nMaxFaceNum;//max face number for tracking
+	int nSampleSize;//down sample size for face detection
+	int nDetectionIntervalFrame;//interval frame number of face detection for face tracking 
+
+	THFT_Param()
+	{
+		nMaxFaceNum = 100;
+		nSampleSize = 640;
+		nDeviceID = 0;
+		nDetectionIntervalFrame = 5;
+	}
+};
+
+#define THFACETRACKING_API extern "C"
+
+
+THFACETRACKING_API int	THFT_Create(short nChannelNum,THFT_Param* pParam);
+/*
+The THFT_Create function will initialize the algorithm engine module
+
+Parameters:
+	nChannelNum[intput],algorithm channel num,for multi-thread mode,one thread uses one channel
+	pParam[input],algorithm engine parameter.
+Return Values:
+	If the function succeeds, the return value is valid channel number.
+	If the function fails, the return value is zero or negative;
+error code:
+	-99,invalid license.
+Remarks:
+	This function only can be called one time at program initialization.
+*/
+
+THFACETRACKING_API void	THFT_Release();
+/*
+The THFT_Release function will release the algorithm engine module
+
+Parameters:
+	No parameter.
+Return Values:
+	No return value.
+Remarks:
+	This function only can be called one time at program exit.
+*/
+
+THFACETRACKING_API int	THFT_FaceTracking(short nChannelID, unsigned char* pBGR,THFT_FaceInfo* pFaceInfos);
+/*
+ The THFT_FaceTracking function execute face detection and face tracking
+
+ Parameters:
+	nChannelID[input],channel ID(from 0 to nChannelNum-1)
+	pBGR[input],image data buffer,BGR format.
+	pFaceInfos[output],the facial position information.
+ Return Values:
+	If the function succeeds, the return value is face number.
+	If the function fails, the return value is negative.
+	error code:
+		-99,invalid license.
+		-1,nChannelID is invalid or SDK is not initialized
+		-2,image data is invalid,please check function parameter:pBGR
+		-3,pFaceInfos is invalid.
+ Remarks:
+	1.image data buffer(pBGR) size must be (THFT_Param::nImageWidth * THFT_Param::nImageHeight * 3)
+	2.pFaceInfos must be allocated by caller,the memory size is THFT_Param::nMaxFaceNum*sizeof(THFT_FaceInfo).
+	3.if image has face(s),face number less than or equal to THFT_Param::nMaxFaceNums
+*/
+
+THFACETRACKING_API int	THFT_FaceDetect(short nChannelID, BYTE* pBGR, int nWidth, int nHeight, THFT_FaceInfo* pFaceInfos, int nMaxFaceNums, int nSampleSize);
+/*
+ The THFT_FaceDetect function execute facial detection for an image
+
+ Parameters:
+	nChannelID[input],channel ID(from 0 to nChannelNum-1)
+	pBGR[input],image data buffer,BGR format.
+	nWidth[input],image width.
+	nHeight[input],image height.
+	pFaceInfos[output],the facial position information.
+	nMaxFaceNums[input],max face nums that you want
+	nSampleSize[input],down sample size(image down sample) for detect image,if it is 0,will detect by original image.
+ Return Values:
+	If the function succeeds, the return value is face number.
+	If the function fails, the return value is negative.
+	error code:
+		-99,invalid license.
+		-1,nChannelID is invalid or SDK is not initialized
+		-2,image data is invalid,please check function parameter:pBGR,nWidth,nHeight
+		-3,pFaceInfos or nMaxFaceNums is invalid.
+ Remarks:
+	1.image data buffer(pBGR) size must be nWidth*nHeight*3.
+	2.pFaceInfos must be allocated by caller,the memory size is nMaxFaceNums*sizeof(THFT_FaceInfo).
+	3.if image has face(s),face number less than or equal to nMaxFaceNums
+*/
+
+THFACETRACKING_API int	THFT_FaceOnly(short nChannelID, BYTE* pBGR, int nWidth, int nHeight, RECT* pFaces, int nMaxFaceNums, int nSampleSize);
+/*
+ The THFT_FaceOnly function execute face rectangle detection only
+
+ Parameters:
+	nChannelID[input],channel ID(from 0 to nChannelNum-1)
+	pBGR[input],image data buffer,BGR format.
+	nWidth[input],image width.
+	nHeight[input],image height.
+	pFaces[output],the face rectangle
+	nMaxFaceNums[input],max face nums that you want
+	nSampleSize[input],down sample size(image down sample) for detect image,if it is 0,will detect by original image.
+ Return Values:
+	If the function succeeds, the return value is face number.
+	If the function fails, the return value is negative.
+	error code:
+		-99,invalid license.
+		-1,nChannelID is invalid or SDK is not initialized
+		-2,image data is invalid,please check function parameter:pBGR,nWidth,nHeight
+		-3,pFaces or nMaxFaceNums is invalid.
+ Remarks:
+	1.image data buffer(pBGR) size must be nWidth*nHeight*3.
+	2.pFaces must be allocated by caller,the memory size is nMaxFaceNums*sizeof(RECT).
+	3.if image has face(s),face number less than or equal to nMaxFaceNums
+*/
+
+
+#endif
diff --git a/gosdk/sdk/face/include/THFeature_i.h b/gosdk/sdk/face/include/THFeature_i.h
new file mode 100644
index 0000000..3d0f93b
--- /dev/null
+++ b/gosdk/sdk/face/include/THFeature_i.h
@@ -0,0 +1,183 @@
+#ifndef THFEATURE_I_H
+#define THFEATURE_I_H
+
+#include "THFaceImage_i.h"
+
+/*
+* ============================================================================
+*  Name     : THFeature_i.h
+*  Part of  : Face Feature (THFeature) SDK
+*  Created  : 10.18.2016 by xxx
+*  Description:
+*     THFeature_i.h -  Face Feature(THFeature) SDK header file
+*  Version  : 5.0.0
+*  Copyright: All Rights Reserved by XXX
+*  Revision:
+* ============================================================================
+*/
+
+#define THFEATURE_API extern "C"
+
+struct TH_Image_Data
+{
+	BYTE* bgr;//MUST BE bgr format buffer,the size is width*height*3 bytes
+	int width;//image width
+	int height;//image height
+};
+
+struct EF_Param
+{
+	int nDeviceID;//device id for GPU device.eg:0,1,2,3.....
+	EF_Param()
+	{
+		nDeviceID = 0;
+	}
+};
+//////API define//////
+
+THFEATURE_API short EF_Init(int nChannelNum);
+/*
+The EF_Init function will initialize the Face Feature(THFeature) algorithm module
+
+Parameters:
+nChannelNum,the channel number,support for muti-thread,one channel stand for one thread.max value is 32.
+Return Values:
+If the function succeeds, the return value is valid channel number.
+If the function fails, the return value is 0 or nagative;
+error code:
+-99,invalid license.
+-1,open file "feadb.db*" error
+-2,check  file "feadb.db*" error
+-3,read  file "feadb.db*" error
+Remarks:
+This function can be called one time at program initialization.
+*/
+
+THFEATURE_API int EF_Size();
+/*
+The EF_Size function will return face feature size.
+
+Parameters:
+No parameter.
+Return Values:
+If the function succeeds, the return value is face feature size.
+If the function fails, the return value is 0 or nagative;
+error code:
+-99,invalid license.
+Remarks:
+No remark.
+*/
+
+THFEATURE_API int EF_Extract(short nChannelID, BYTE* pBuf, int nWidth, int nHeight, int nChannel, THFI_FacePos* ptfp, BYTE* pFeature);
+/*
+The EF_Extract function execute face feature extraction from one photo
+
+Parameters:
+nChannelID[input],channel ID(from 0 to nChannelNum-1)
+pBuf[input],point to an image buffer,BGR format.
+nWidth[input],the image width.
+nHeight[input],the image height.
+nChannel[input],image buffer channel,must be 3
+ptfp[input],the facial data of a face.
+pFeature[output],the face feature buffer
+Return Values:
+If the function succeeds, the return value is 1.
+If the function fails, the return value is nagative.
+error code:
+-99,invalid license.
+-1,pBuf,ptfp,pFeature is NULL
+-2,nChannelID is invalid or SDK is not initialized
+Remarks:
+No remark.
+*/
+
+THFEATURE_API int EF_Extract_M(short nChannelID, BYTE* pBuf, int nWidth, int nHeight, int nChannel, THFI_FacePos* ptfps, BYTE* pFeatures, int nFaceNum);
+/*
+The EF_Extract_M function execute face feature extraction for muti-faces from one photo
+
+Parameters:
+nChannelID[input],channel ID(from 0 to nChannelNum-1)
+pBuf[input],point to an image buffer,BGR format.
+nWidth[input],the image width.
+nHeight[input],the image height.
+nChannel[input],image buffer channel,must be 3
+ptfps[input],the facial data of muti-faces
+pFeatures[output],the face feature buffer for muti-faces
+nFaceNum[input],the face number
+Return Values:
+If the function succeeds, the return value is 1.
+If the function fails, the return value is 0 or nagative.
+error code:
+-99,invalid license.
+-1,pBuf,ptfps,pFeatures is NULL
+-2,nChannelID is invalid or SDK is not initialized
+Remarks:
+No remark.
+*/
+
+THFEATURE_API int EF_Extracts(short nChannelID, TH_Image_Data* ptids, THFI_FacePos* ptfps, BYTE* pFeatures, int nNum);
+/*
+The EF_Extracts function execute face feature extraction for muti-faces from muti-photos
+
+Parameters:
+nChannelID[input],channel ID(from 0 to nChannelNum-1)
+ptids[input],the image data list of muti-photos
+ptfps[input],the facial data list of muti-photos(one image data-one facial data)
+pFeatures[output],the face feature buffer for muti-faces
+nNum[input],the image data number
+Return Values:
+If the function succeeds, the return value is 1.
+If the function fails, the return value is 0 or nagative.
+error code:
+-99,invalid license.
+-1,ptids,ptfp,pFeature is NULL
+-2,nChannelID is invalid or SDK is not initialized
+Remarks:
+No remark.
+*/
+
+THFEATURE_API float EF_Compare(BYTE* pFeature1, BYTE* pFeature2);
+/*
+The EF_Compare function execute two face features compare.
+
+Parameters:
+pFeature1[input],point to one face feature buffer.
+pFeature2[input],point to another face feature buffer.
+Return Values:
+the return value is the two face features's similarity.
+Remarks:
+No remark.
+*/
+
+THFEATURE_API void EF_Release();
+/*
+The EF_Release function will release the Face Feature (THFeature) algorithm module
+
+Parameters:
+No parameter.
+Return Values:
+No return value.
+Remarks:
+This function can be called one time at program Un-Initialization.
+*/
+
+THFEATURE_API short EF_Init_Ex(int nChannelNum, EF_Param* pParam = NULL);
+/*
+The EF_Init_Ex function will initialize the Face Feature(THFeature) algorithm module,only for GPU version
+
+Parameters:
+nChannelNum,the channel number,support for muti-thread,one channel stand for one thread.max value is 32.
+pParam,initialize parameter
+Return Values:
+If the function succeeds, the return value is valid channel number.
+If the function fails, the return value is 0 or nagative;
+error code:
+-99,invalid license.
+-1,open file "feadb.db*" error
+-2,check  file "feadb.db*" error
+-3,read  file "feadb.db*" error
+Remarks:
+This function can be called one time at program initialization.
+*/
+
+#endif
diff --git a/gosdk/sdk/face/readme.txt b/gosdk/sdk/face/readme.txt
new file mode 100644
index 0000000..31f4a53
--- /dev/null
+++ b/gosdk/sdk/face/readme.txt
@@ -0,0 +1 @@
+Face-SDK-CUDA-Linux64V7.0.3   和  FaceTracking-SDK-CUDA-Linux64 V1.0.0-timeout2018 的合并
diff --git a/gosdk/wsdk.go b/gosdk/wsdk.go
new file mode 100644
index 0000000..ff7cc2a
--- /dev/null
+++ b/gosdk/wsdk.go
@@ -0,0 +1,41 @@
+package gosdk
+
+import (
+	"C"
+)
+
+// func (s *Goapi) NewAPIFaceFeature(thread_max, gpu_index int) {
+// 	detectMinFaces := 20
+// 	detectRollAngle := 60
+// 	s.NewAPIFaceDetector(thread_max, gpu_index, detectMinFaces, detectRollAngle)
+
+// 	s.NewAPIFaceProperty(thread_max)
+
+// 	s.NewAPIFaceExtractor(thread_max, gpu_index)
+// }
+
+// func (s *Goapi) FaceFeature(d []byte, w, h, channel int) []interface{} {
+// 	cres := s.APIFaceDetect(d, w, h, channel)
+// 	if cres == nil {
+// 		return nil
+// 	}
+// 	gores := (*CResult)(unsafe.Pointer(cres))
+
+// 	count := gores.Count
+// 	// count := getResultCount(det)
+
+// 	faces := (*CFacePos)(unsafe.Pointer(gores.Data))
+// 	fmt.Printf("det count %d", count)
+// 	// result := make([]FaceDetectResult, count)
+
+// 	// for i := 0; i < count; i++ {
+// 	// 	ext := s.APIFaceExtract(det, i, d, w, h, channel)
+// 	// 	if ext == nil {
+// 	// 		continue
+// 	// 	}
+
+// 	// 	res := FaceDetectResult{}
+// 	// }
+
+// 	return nil
+// }

--
Gitblit v1.8.0