From 168af40fe9a3cc81c6ee16b3e81f154780c36bdb Mon Sep 17 00:00:00 2001
From: Scheaven <xuepengqiang>
Date: 星期四, 03 六月 2021 15:03:27 +0800
Subject: [PATCH] up new v4

---
 lib/detecter_tools/darknet/activation_layer.c |  126 +++++++++++++++++++++---------------------
 1 files changed, 63 insertions(+), 63 deletions(-)

diff --git a/lib/detecter_tools/darknet/activation_layer.c b/lib/detecter_tools/darknet/activation_layer.c
index 5db10d8..4383d7e 100644
--- a/lib/detecter_tools/darknet/activation_layer.c
+++ b/lib/detecter_tools/darknet/activation_layer.c
@@ -1,63 +1,63 @@
-#include "activation_layer.h"
-#include "utils.h"
-#include "dark_cuda.h"
-#include "blas.h"
-#include "gemm.h"
-
-#include <math.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-layer make_activation_layer(int batch, int inputs, ACTIVATION activation)
-{
-    layer l = { (LAYER_TYPE)0 };
-    l.type = ACTIVE;
-
-    l.inputs = inputs;
-    l.outputs = inputs;
-    l.batch=batch;
-
-    l.output = (float*)xcalloc(batch * inputs, sizeof(float));
-    l.delta = (float*)xcalloc(batch * inputs, sizeof(float));
-
-    l.forward = forward_activation_layer;
-    l.backward = backward_activation_layer;
-#ifdef GPU
-    l.forward_gpu = forward_activation_layer_gpu;
-    l.backward_gpu = backward_activation_layer_gpu;
-
-    l.output_gpu = cuda_make_array(l.output, inputs*batch);
-    l.delta_gpu = cuda_make_array(l.delta, inputs*batch);
-#endif
-    l.activation = activation;
-    fprintf(stderr, "Activation Layer: %d inputs\n", inputs);
-    return l;
-}
-
-void forward_activation_layer(layer l, network_state state)
-{
-    copy_cpu(l.outputs*l.batch, state.input, 1, l.output, 1);
-    activate_array(l.output, l.outputs*l.batch, l.activation);
-}
-
-void backward_activation_layer(layer l, network_state state)
-{
-    gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta);
-    copy_cpu(l.outputs*l.batch, l.delta, 1, state.delta, 1);
-}
-
-#ifdef GPU
-
-void forward_activation_layer_gpu(layer l, network_state state)
-{
-    copy_ongpu(l.outputs*l.batch, state.input, 1, l.output_gpu, 1);
-    activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
-}
-
-void backward_activation_layer_gpu(layer l, network_state state)
-{
-    gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
-    copy_ongpu(l.outputs*l.batch, l.delta_gpu, 1, state.delta, 1);
-}
-#endif
+#include "activation_layer.h"
+#include "utils.h"
+#include "dark_cuda.h"
+#include "blas.h"
+#include "gemm.h"
+
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+layer make_activation_layer(int batch, int inputs, ACTIVATION activation)
+{
+    layer l = { (LAYER_TYPE)0 };
+    l.type = ACTIVE;
+
+    l.inputs = inputs;
+    l.outputs = inputs;
+    l.batch=batch;
+
+    l.output = (float*)xcalloc(batch * inputs, sizeof(float));
+    l.delta = (float*)xcalloc(batch * inputs, sizeof(float));
+
+    l.forward = forward_activation_layer;
+    l.backward = backward_activation_layer;
+#ifdef GPU
+    l.forward_gpu = forward_activation_layer_gpu;
+    l.backward_gpu = backward_activation_layer_gpu;
+
+    l.output_gpu = cuda_make_array(l.output, inputs*batch);
+    l.delta_gpu = cuda_make_array(l.delta, inputs*batch);
+#endif
+    l.activation = activation;
+    fprintf(stderr, "Activation Layer: %d inputs\n", inputs);
+    return l;
+}
+
+void forward_activation_layer(layer l, network_state state)
+{
+    copy_cpu(l.outputs*l.batch, state.input, 1, l.output, 1);
+    activate_array(l.output, l.outputs*l.batch, l.activation);
+}
+
+void backward_activation_layer(layer l, network_state state)
+{
+    gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta);
+    copy_cpu(l.outputs*l.batch, l.delta, 1, state.delta, 1);
+}
+
+#ifdef GPU
+
+void forward_activation_layer_gpu(layer l, network_state state)
+{
+    copy_ongpu(l.outputs*l.batch, state.input, 1, l.output_gpu, 1);
+    activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
+}
+
+void backward_activation_layer_gpu(layer l, network_state state)
+{
+    gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
+    copy_ongpu(l.outputs*l.batch, l.delta_gpu, 1, state.delta, 1);
+}
+#endif

--
Gitblit v1.8.0