commit
5c80d1bd1f
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,3 @@
|
||||
if(WITH_AVX)
|
||||
cc_library(activation_functions SRCS hl_cpu_functions.cc hl_avx_functions.cc)
|
||||
else()
|
||||
cc_library(activation_functions SRCS hl_cpu_functions.cc)
|
||||
cc_library(activation_functions SRCS avx_functions.cc)
|
||||
endif()
|
||||
|
@ -0,0 +1,170 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#pragma once
|
||||
#include <math.h>
|
||||
#include "paddle/platform/hostdevice.h"
|
||||
|
||||
#ifdef __AVX__
|
||||
#include <immintrin.h>
|
||||
#endif
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
namespace math {
|
||||
namespace detail {
|
||||
|
||||
#define SIGMOID_THRESHOLD_MIN -40.0
|
||||
#define SIGMOID_THRESHOLD_MAX 13.0
|
||||
#define EXP_MAX_INPUT 40.0
|
||||
|
||||
namespace forward {
|
||||
|
||||
template <typename T>
|
||||
DEVICE T Identity(const T a) {
|
||||
return a;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
DEVICE T Relu(const T a) {
|
||||
return a > static_cast<T>(0.0) ? a : static_cast<T>(0.0);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
DEVICE T Sigmoid(const T a) {
|
||||
const T min = SIGMOID_THRESHOLD_MIN;
|
||||
const T max = SIGMOID_THRESHOLD_MAX;
|
||||
T tmp = (a < min) ? min : ((a > max) ? max : a);
|
||||
return static_cast<T>(1.0) / (static_cast<T>(1.0) + exp(-tmp));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
DEVICE T Tanh(const T a) {
|
||||
T tmp = -2.0 * a;
|
||||
tmp = (tmp > EXP_MAX_INPUT) ? EXP_MAX_INPUT : tmp;
|
||||
return (2.0 / (1.0 + exp(tmp))) - 1.0;
|
||||
}
|
||||
|
||||
} // namespace forward
|
||||
|
||||
namespace backward {
|
||||
|
||||
template <typename T>
|
||||
DEVICE T Identity(const T a, const T b) {
|
||||
return a;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
DEVICE T Relu(const T a, const T b) {
|
||||
return a * (b > 0.0 ? 1.0 : 0.0);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
DEVICE T Sigmoid(const T a, const T b) {
|
||||
return a * b * (1.0 - b);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
DEVICE T Tanh(const T a, const T b) {
|
||||
return a * (1.0 - b * b);
|
||||
}
|
||||
|
||||
} // namespace backward
|
||||
|
||||
template <typename T>
|
||||
struct Active {
|
||||
typedef T (*Act)(T);
|
||||
typedef T (*ActGrad)(T, T);
|
||||
};
|
||||
|
||||
static DEVICE Active<float>::Act kActFloat[] = {
|
||||
&forward::Sigmoid<float>, &forward::Relu<float>, &forward::Tanh<float>,
|
||||
&forward::Identity<float>};
|
||||
|
||||
static DEVICE Active<float>::ActGrad kActGradFloat[] = {
|
||||
&backward::Sigmoid<float>, &backward::Relu<float>, &backward::Tanh<float>,
|
||||
&backward::Identity<float>};
|
||||
|
||||
static DEVICE Active<double>::Act kActDouble[] = {
|
||||
&forward::Sigmoid<double>, &forward::Relu<double>, &forward::Tanh<double>,
|
||||
&forward::Identity<double>};
|
||||
|
||||
static DEVICE Active<double>::ActGrad kActGradDouble[] = {
|
||||
&backward::Sigmoid<double>, &backward::Relu<double>,
|
||||
&backward::Tanh<double>, &backward::Identity<double>};
|
||||
|
||||
namespace forward {
|
||||
inline DEVICE float activation(float a, int index) {
|
||||
return kActFloat[index](a);
|
||||
}
|
||||
|
||||
inline DEVICE double activation(double a, int index) {
|
||||
return kActDouble[index](a);
|
||||
}
|
||||
|
||||
} // namespace forward
|
||||
|
||||
namespace backward {
|
||||
inline DEVICE float activation(float a, float b, int index) {
|
||||
return kActGradFloat[index](a, b);
|
||||
}
|
||||
|
||||
inline DEVICE double activation(double a, double b, int index) {
|
||||
return kActGradDouble[index](a, b);
|
||||
}
|
||||
} // namespace backward
|
||||
|
||||
#ifdef __AVX__
|
||||
namespace forward {
|
||||
namespace avx {
|
||||
__m256 Relu(const __m256 a);
|
||||
__m256 Sigmoid(const __m256 a);
|
||||
__m256 Tanh(const __m256 a);
|
||||
__m256 Identity(const __m256 a);
|
||||
} // namespace avx
|
||||
} // namespace forward
|
||||
|
||||
namespace backward {
|
||||
namespace avx {
|
||||
__m256 Relu(const __m256 a, const __m256 b);
|
||||
__m256 Sigmoid(const __m256 a, const __m256 b);
|
||||
__m256 Tanh(const __m256 a, const __m256 b);
|
||||
__m256 Identity(const __m256 a, const __m256 b);
|
||||
} // namespace avx
|
||||
} // namespace backward
|
||||
|
||||
static Active<__m256>::Act kActAvx[] = {
|
||||
&forward::avx::Sigmoid, &forward::avx::Relu, &forward::avx::Tanh,
|
||||
&forward::avx::Identity};
|
||||
|
||||
static Active<__m256>::ActGrad kActGradAvx[] = {
|
||||
&backward::avx::Sigmoid, &backward::avx::Relu, &backward::avx::Tanh,
|
||||
&backward::avx::Identity};
|
||||
|
||||
namespace forward {
|
||||
inline __m256 activation(__m256 a, int index) { return kActAvx[index](a); }
|
||||
} // namespace forward
|
||||
|
||||
namespace backward {
|
||||
inline __m256 activation(__m256 a, __m256 b, int index) {
|
||||
return kActGradAvx[index](a, b);
|
||||
}
|
||||
} // namespace backward
|
||||
|
||||
#endif
|
||||
|
||||
} // namespace detail
|
||||
} // namespace math
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
@ -1,188 +0,0 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#ifndef HL_ACTIVATION_FUNCTIONS_H_
|
||||
#define HL_ACTIVATION_FUNCTIONS_H_
|
||||
|
||||
#include "hl_functions.h"
|
||||
#include "paddle/operators/math/lstm_compute.h"
|
||||
|
||||
/**
|
||||
* Active functions: sigmoid, relu, tanh and linear.
|
||||
*/
|
||||
#define FLOAT_ACTIVE_FUNCTION \
|
||||
{ \
|
||||
hppl::typef::sigmoid, hppl::typef::relu, hppl::typef::tanh, \
|
||||
hppl::typef::linear \
|
||||
}
|
||||
|
||||
#define DOUBLE_ACTIVE_FUNCTION \
|
||||
{ \
|
||||
hppl::typed::sigmoid, hppl::typed::relu, hppl::typed::tanh, \
|
||||
hppl::typed::linear \
|
||||
}
|
||||
|
||||
#define AVX_ACTIVE_FUNCTION \
|
||||
{ hppl::sigmoid, hppl::relu, hppl::tanh, hppl::linear }
|
||||
|
||||
namespace hppl {
|
||||
|
||||
using activation_mode_t = paddle::operators::math::activation_mode_t;
|
||||
|
||||
/**
|
||||
* Hppl supports sigmoid, relu, tanh, linear active functions
|
||||
* for neural networks' forward and backward activation.
|
||||
*/
|
||||
template <class T>
|
||||
class Active {
|
||||
public:
|
||||
typedef T (*forward)(T);
|
||||
typedef T (*backward)(T, T);
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct ForwardActType;
|
||||
|
||||
template <>
|
||||
struct ForwardActType<float> {
|
||||
using type = Active<float>::forward;
|
||||
};
|
||||
|
||||
template <>
|
||||
struct ForwardActType<double> {
|
||||
using type = Active<double>::forward;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct BackwardActType;
|
||||
|
||||
template <>
|
||||
struct BackwardActType<float> {
|
||||
using type = Active<float>::backward;
|
||||
};
|
||||
|
||||
template <>
|
||||
struct BackwardActType<double> {
|
||||
using type = Active<double>::backward;
|
||||
};
|
||||
|
||||
#ifdef __NVCC__
|
||||
namespace gpu {
|
||||
static __device__ Active<float>::forward forward[] = FLOAT_ACTIVE_FUNCTION;
|
||||
static __device__ Active<float>::backward backward[] = FLOAT_ACTIVE_FUNCTION;
|
||||
|
||||
static __device__ Active<double>::forward forward_d[] = DOUBLE_ACTIVE_FUNCTION;
|
||||
static __device__ Active<double>::backward backward_d[] =
|
||||
DOUBLE_ACTIVE_FUNCTION;
|
||||
|
||||
template <typename T>
|
||||
struct ForwardAct {
|
||||
__device__ typename ForwardActType<T>::type operator()(
|
||||
activation_mode_t type);
|
||||
};
|
||||
|
||||
template <>
|
||||
struct ForwardAct<float> {
|
||||
__device__ ForwardActType<float>::type operator()(activation_mode_t type) {
|
||||
return forward[type];
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct ForwardAct<double> {
|
||||
__device__ ForwardActType<double>::type operator()(activation_mode_t type) {
|
||||
return forward_d[type];
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct BackwardAct {
|
||||
__device__ typename BackwardActType<T>::type operator()(
|
||||
activation_mode_t type);
|
||||
};
|
||||
|
||||
template <>
|
||||
struct BackwardAct<float> {
|
||||
__device__ BackwardActType<float>::type operator()(activation_mode_t type) {
|
||||
return backward[type];
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct BackwardAct<double> {
|
||||
__device__ BackwardActType<double>::type operator()(activation_mode_t type) {
|
||||
return backward_d[type];
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace gpu
|
||||
#else
|
||||
namespace cpu {
|
||||
static Active<float>::forward forward[] = FLOAT_ACTIVE_FUNCTION;
|
||||
static Active<float>::backward backward[] = FLOAT_ACTIVE_FUNCTION;
|
||||
|
||||
static Active<double>::forward forward_d[] = DOUBLE_ACTIVE_FUNCTION;
|
||||
static Active<double>::backward backward_d[] = DOUBLE_ACTIVE_FUNCTION;
|
||||
|
||||
template <typename T>
|
||||
struct ForwardAct {
|
||||
typename ForwardActType<T>::type operator()(activation_mode_t type);
|
||||
};
|
||||
|
||||
template <>
|
||||
struct ForwardAct<float> {
|
||||
ForwardActType<float>::type operator()(activation_mode_t type) {
|
||||
return forward[type];
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct ForwardAct<double> {
|
||||
ForwardActType<double>::type operator()(activation_mode_t type) {
|
||||
return forward_d[type];
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct BackwardAct {
|
||||
typename BackwardActType<T>::type operator()(activation_mode_t type);
|
||||
};
|
||||
|
||||
template <>
|
||||
struct BackwardAct<float> {
|
||||
BackwardActType<float>::type operator()(activation_mode_t type) {
|
||||
return backward[type];
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct BackwardAct<double> {
|
||||
BackwardActType<double>::type operator()(activation_mode_t type) {
|
||||
return backward_d[type];
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace cpu
|
||||
|
||||
#ifdef __AVX__
|
||||
namespace avx {
|
||||
static Active<__m256>::forward forward[] = AVX_ACTIVE_FUNCTION;
|
||||
static Active<__m256>::backward backward[] = AVX_ACTIVE_FUNCTION;
|
||||
} // namespace avx
|
||||
#endif
|
||||
#endif
|
||||
|
||||
} // namespace hppl
|
||||
|
||||
#endif // HL_ACTIVATION_FUNCTIONS_H_
|
@ -1,32 +0,0 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#ifndef HL_AVX_FUNCTIONS_H_
|
||||
#define HL_AVX_FUNCTIONS_H_
|
||||
|
||||
#include <immintrin.h>
|
||||
|
||||
namespace hppl {
|
||||
__m256 relu(const __m256 a);
|
||||
__m256 sigmoid(const __m256 a);
|
||||
__m256 tanh(const __m256 a);
|
||||
__m256 linear(const __m256 a);
|
||||
|
||||
__m256 relu(const __m256 a, const __m256 b);
|
||||
__m256 sigmoid(const __m256 a, const __m256 b);
|
||||
__m256 tanh(const __m256 a, const __m256 b);
|
||||
__m256 linear(const __m256 a, const __m256 b);
|
||||
} // namespace hppl
|
||||
|
||||
#endif // HL_AVX_FUNCTIONS_H_
|
@ -1,89 +0,0 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include <math.h>
|
||||
#include "hl_functions.h"
|
||||
|
||||
namespace hppl {
|
||||
namespace typef {
|
||||
|
||||
float relu(const float a) {
|
||||
return a > static_cast<float>(0.0) ? a : static_cast<float>(0.0);
|
||||
}
|
||||
|
||||
float sigmoid(const float a) {
|
||||
const float min = SIGMOID_THRESHOLD_MIN;
|
||||
const float max = SIGMOID_THRESHOLD_MAX;
|
||||
float tmp = (a < min) ? min : ((a > max) ? max : a);
|
||||
return static_cast<float>(1.0) / (static_cast<float>(1.0) + exp(-tmp));
|
||||
}
|
||||
|
||||
float tanh(const float a) {
|
||||
float tmp = -2.0 * a;
|
||||
tmp = (tmp > EXP_MAX_INPUT) ? EXP_MAX_INPUT : tmp;
|
||||
return (2.0 / (1.0 + exp(tmp))) - 1.0;
|
||||
}
|
||||
|
||||
float linear(const float a) { return a; }
|
||||
|
||||
float relu(const float a, const float b) { return a * (b > 0.0 ? 1.0 : 0.0); }
|
||||
|
||||
float sigmoid(const float a, const float b) {
|
||||
return a * b * (static_cast<float>(1) - b);
|
||||
}
|
||||
|
||||
float tanh(const float a, const float b) {
|
||||
return a * (static_cast<float>(1) - b * b);
|
||||
}
|
||||
|
||||
float linear(const float a, const float b) { return a; }
|
||||
|
||||
} // namespace typef
|
||||
|
||||
namespace typed {
|
||||
double relu(const double a) {
|
||||
return a > static_cast<double>(0.0) ? a : static_cast<double>(0.0);
|
||||
}
|
||||
|
||||
double sigmoid(const double a) {
|
||||
const double min = SIGMOID_THRESHOLD_MIN;
|
||||
const double max = SIGMOID_THRESHOLD_MAX;
|
||||
double tmp = (a < min) ? min : ((a > max) ? max : a);
|
||||
return static_cast<double>(1.0) / (static_cast<double>(1.0) + exp(-tmp));
|
||||
}
|
||||
|
||||
double tanh(const double a) {
|
||||
double tmp = -2.0 * a;
|
||||
tmp = (tmp > EXP_MAX_INPUT) ? EXP_MAX_INPUT : tmp;
|
||||
return (2.0 / (1.0 + exp(tmp))) - 1.0;
|
||||
}
|
||||
|
||||
double linear(const double a) { return a; }
|
||||
|
||||
double relu(const double a, const double b) {
|
||||
return a * (b > 0.0 ? 1.0 : 0.0);
|
||||
}
|
||||
|
||||
double sigmoid(const double a, const double b) {
|
||||
return a * b * (static_cast<double>(1) - b);
|
||||
}
|
||||
|
||||
double tanh(const double a, const double b) {
|
||||
return a * (static_cast<double>(1) - b * b);
|
||||
}
|
||||
|
||||
double linear(const double a, const double b) { return a; }
|
||||
|
||||
} // namespace typed
|
||||
} // namespace hppl
|
@ -1,71 +0,0 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#ifndef HL_FUNCTIONS_H_
|
||||
#define HL_FUNCTIONS_H_
|
||||
|
||||
/**
|
||||
* sigmoid threshold maximum
|
||||
*/
|
||||
#define SIGMOID_THRESHOLD_MIN -40.0
|
||||
|
||||
/**
|
||||
* sigmoid threshold minimum
|
||||
*/
|
||||
#define SIGMOID_THRESHOLD_MAX 13.0
|
||||
|
||||
/**
|
||||
* The maximum input value for exp, used to avoid overflow problem.
|
||||
* currently only used for tanh function.
|
||||
*/
|
||||
#define EXP_MAX_INPUT 40.0
|
||||
|
||||
#ifndef __NVCC__
|
||||
namespace hppl {
|
||||
namespace typef {
|
||||
float relu(const float a);
|
||||
float sigmoid(const float a);
|
||||
float tanh(const float a);
|
||||
float linear(const float a);
|
||||
|
||||
float relu(const float a, const float b);
|
||||
float sigmoid(const float a, const float b);
|
||||
float tanh(const float a, const float b);
|
||||
float linear(const float a, const float b);
|
||||
|
||||
} // namespace typef
|
||||
|
||||
namespace typed {
|
||||
double relu(const double a);
|
||||
double sigmoid(const double a);
|
||||
double tanh(const double a);
|
||||
double linear(const double a);
|
||||
|
||||
double relu(const double a, const double b);
|
||||
double sigmoid(const double a, const double b);
|
||||
double tanh(const double a, const double b);
|
||||
double linear(const double a, const double b);
|
||||
} // namespace typed
|
||||
|
||||
} // namespace hppl
|
||||
|
||||
#ifdef __AVX__
|
||||
#include "hl_avx_functions.h"
|
||||
#endif
|
||||
|
||||
#else
|
||||
#include "hl_gpu_functions.h"
|
||||
#endif
|
||||
|
||||
#endif // HL_FUNCTIONS_H_
|
@ -1,93 +0,0 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#ifndef HL_GPU_FUNCTIONS_CUH_
|
||||
#define HL_GPU_FUNCTIONS_CUH_
|
||||
|
||||
#include "hl_base.h"
|
||||
|
||||
namespace hppl {
|
||||
namespace typef {
|
||||
|
||||
__device__ static float relu(const float a) { return a > 0.0f ? a : 0.0f; }
|
||||
|
||||
__device__ static float sigmoid(const float a) {
|
||||
const float min = SIGMOID_THRESHOLD_MIN;
|
||||
const float max = SIGMOID_THRESHOLD_MAX;
|
||||
float tmp = (a < min) ? min : ((a > max) ? max : a);
|
||||
return __fdividef(1.0f, 1.0f + __expf(-tmp));
|
||||
}
|
||||
|
||||
__device__ static float tanh(const float a) {
|
||||
float tmp = -2.0 * a;
|
||||
tmp = (tmp > EXP_MAX_INPUT) ? EXP_MAX_INPUT : tmp;
|
||||
return __fdividef(2.0f, (1.0f + __expf(-2.0f * tmp))) - 1.0f;
|
||||
}
|
||||
|
||||
__device__ static float linear(const float a) { return a; }
|
||||
|
||||
__device__ static float relu(const float a, const float b) {
|
||||
return a * (b > 0.0f ? 1.0f : 0.0f);
|
||||
}
|
||||
|
||||
__device__ static float sigmoid(const float a, const float b) {
|
||||
return a * b * (1.0f - b);
|
||||
}
|
||||
|
||||
__device__ static float tanh(const float a, const float b) {
|
||||
return a * (1.0f - b * b);
|
||||
}
|
||||
|
||||
__device__ static float linear(const float a, const float b) { return a; }
|
||||
|
||||
} // namespace typef
|
||||
|
||||
namespace typed {
|
||||
|
||||
__device__ static double relu(const double a) { return a > 0.0 ? a : 0.0; }
|
||||
|
||||
__device__ static double sigmoid(const double a) {
|
||||
const double min = SIGMOID_THRESHOLD_MIN;
|
||||
const double max = SIGMOID_THRESHOLD_MAX;
|
||||
double tmp = (a < min) ? min : ((a > max) ? max : a);
|
||||
return 1.0 / (1.0 + exp(-tmp));
|
||||
}
|
||||
|
||||
__device__ static double tanh(const double a) {
|
||||
double tmp = -2.0 * a;
|
||||
tmp = (tmp > EXP_MAX_INPUT) ? EXP_MAX_INPUT : tmp;
|
||||
return (2.0 / (1.0 + exp(-2.0 * a))) - 1.0;
|
||||
}
|
||||
|
||||
__device__ static double linear(const double a) { return a; }
|
||||
|
||||
__device__ static double relu(const double a, const double b) {
|
||||
return a * (b > 0.0 ? 1.0 : 0.0);
|
||||
}
|
||||
|
||||
__device__ static double sigmoid(const double a, const double b) {
|
||||
return a * b * (1 - b);
|
||||
}
|
||||
|
||||
__device__ static double tanh(const double a, const double b) {
|
||||
return a * (1.0 - b * b);
|
||||
}
|
||||
|
||||
__device__ static double linear(const double a, const double b) { return a; }
|
||||
|
||||
} // namespace typef
|
||||
|
||||
} // namespace hppl
|
||||
|
||||
#endif // HL_GPU_FUNCTIONS_CUH_
|
Loading…
Reference in new issue