|
|
|
@ -25,10 +25,6 @@ limitations under the License. */
|
|
|
|
|
#include "paddle/fluid/platform/dynload/mklml.h"
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
#ifdef __AVX__
|
|
|
|
|
#include <immintrin.h>
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
namespace paddle {
|
|
|
|
|
namespace operators {
|
|
|
|
|
namespace math {
|
|
|
|
@ -235,154 +231,6 @@ REGISTER_JITKERNEL(vexp, VExpKernel);
|
|
|
|
|
REGISTER_JITKERNEL(vsigmoid, VSigmoidKernel);
|
|
|
|
|
REGISTER_JITKERNEL(vtanh, VTanhKernel);
|
|
|
|
|
|
|
|
|
|
namespace detail {
|
|
|
|
|
|
|
|
|
|
#ifdef __AVX__
|
|
|
|
|
|
|
|
|
|
#define ALIGN32 __attribute__((aligned(32)))
|
|
|
|
|
|
|
|
|
|
#define _PS256_CONST(Name, Val) \
|
|
|
|
|
static const float _ps256_##Name[8] ALIGN32 = {Val, Val, Val, Val, \
|
|
|
|
|
Val, Val, Val, Val}
|
|
|
|
|
|
|
|
|
|
#define _PI256_CONST(Name, Val) \
|
|
|
|
|
static const int _pi256_##Name[8] ALIGN32 = {Val, Val, Val, Val, \
|
|
|
|
|
Val, Val, Val, Val}
|
|
|
|
|
|
|
|
|
|
_PI256_CONST(0x7f, 0x7f);
|
|
|
|
|
_PS256_CONST(one, 1.f);
|
|
|
|
|
_PS256_CONST(0p5, 0.5f);
|
|
|
|
|
_PS256_CONST(exp_hi, 88.3762626647949f);
|
|
|
|
|
_PS256_CONST(exp_lo, -88.3762626647949f);
|
|
|
|
|
_PS256_CONST(cephes_LOG2EF, 1.44269504088896341);
|
|
|
|
|
_PS256_CONST(cephes_exp_C1, 0.693359375);
|
|
|
|
|
_PS256_CONST(cephes_exp_C2, -2.12194440e-4);
|
|
|
|
|
_PS256_CONST(cephes_exp_p0, 1.9875691500E-4);
|
|
|
|
|
_PS256_CONST(cephes_exp_p1, 1.3981999507E-3);
|
|
|
|
|
_PS256_CONST(cephes_exp_p2, 8.3334519073E-3);
|
|
|
|
|
_PS256_CONST(cephes_exp_p3, 4.1665795894E-2);
|
|
|
|
|
_PS256_CONST(cephes_exp_p4, 1.6666665459E-1);
|
|
|
|
|
_PS256_CONST(cephes_exp_p5, 5.0000001201E-1);
|
|
|
|
|
|
|
|
|
|
typedef union imm_xmm_union {
|
|
|
|
|
__m256i imm;
|
|
|
|
|
__m128i xmm[2];
|
|
|
|
|
} imm_xmm_union;
|
|
|
|
|
|
|
|
|
|
#define COPY_IMM_TO_XMM(imm_, xmm0_, xmm1_) \
|
|
|
|
|
{ \
|
|
|
|
|
imm_xmm_union u ALIGN32; \
|
|
|
|
|
u.imm = imm_; \
|
|
|
|
|
xmm0_ = u.xmm[0]; \
|
|
|
|
|
xmm1_ = u.xmm[1]; \
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define COPY_XMM_TO_IMM(xmm0_, xmm1_, imm_) \
|
|
|
|
|
{ \
|
|
|
|
|
imm_xmm_union u ALIGN32; \
|
|
|
|
|
u.xmm[0] = xmm0_; \
|
|
|
|
|
u.xmm[1] = xmm1_; \
|
|
|
|
|
imm_ = u.imm; \
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define AVX2_BITOP_USING_SSE2(fn) \
|
|
|
|
|
static inline __m256i avx2_mm256_##fn(__m256i x, int y) { \
|
|
|
|
|
/* use SSE2 to perform the bitop AVX2 */ \
|
|
|
|
|
__m128i x1, x2; \
|
|
|
|
|
__m256i ret; \
|
|
|
|
|
COPY_IMM_TO_XMM(x, x1, x2); \
|
|
|
|
|
x1 = _mm_##fn(x1, y); \
|
|
|
|
|
x2 = _mm_##fn(x2, y); \
|
|
|
|
|
COPY_XMM_TO_IMM(x1, x2, ret); \
|
|
|
|
|
return ret; \
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define AVX2_INTOP_USING_SSE2(fn) \
|
|
|
|
|
static inline __m256i avx2_mm256_add_epi32(__m256i x, __m256i y) { \
|
|
|
|
|
/* use SSE2 to perform the AVX2 integer operation */ \
|
|
|
|
|
__m128i x1, x2; \
|
|
|
|
|
__m128i y1, y2; \
|
|
|
|
|
__m256i ret; \
|
|
|
|
|
COPY_IMM_TO_XMM(x, x1, x2); \
|
|
|
|
|
COPY_IMM_TO_XMM(y, y1, y2); \
|
|
|
|
|
x1 = _mm_##fn(x1, y1); \
|
|
|
|
|
x2 = _mm_##fn(x2, y2); \
|
|
|
|
|
COPY_XMM_TO_IMM(x1, x2, ret); \
|
|
|
|
|
return ret; \
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
AVX2_BITOP_USING_SSE2(slli_epi32);
|
|
|
|
|
AVX2_INTOP_USING_SSE2(add_epi32);
|
|
|
|
|
|
|
|
|
|
#define AVXEXP_BASE \
|
|
|
|
|
__m256 tmp = _mm256_setzero_ps(), fx; \
|
|
|
|
|
__m256 one = *reinterpret_cast<const __m256*>(_ps256_one); \
|
|
|
|
|
__m256i imm0; \
|
|
|
|
|
x = _mm256_min_ps(x, *reinterpret_cast<const __m256*>(_ps256_exp_hi)); \
|
|
|
|
|
x = _mm256_max_ps(x, *reinterpret_cast<const __m256*>(_ps256_exp_lo)); \
|
|
|
|
|
/* express exp(x) as exp(g + n*log(2)) */ \
|
|
|
|
|
fx = _mm256_mul_ps(x, \
|
|
|
|
|
*reinterpret_cast<const __m256*>(_ps256_cephes_LOG2EF)); \
|
|
|
|
|
fx = _mm256_add_ps(fx, *reinterpret_cast<const __m256*>(_ps256_0p5)); \
|
|
|
|
|
tmp = _mm256_floor_ps(fx); \
|
|
|
|
|
/* if greater, substract 1 */ \
|
|
|
|
|
__m256 mask = _mm256_cmp_ps(tmp, fx, _CMP_GT_OS); \
|
|
|
|
|
mask = _mm256_and_ps(mask, one); \
|
|
|
|
|
fx = _mm256_sub_ps(tmp, mask); \
|
|
|
|
|
tmp = _mm256_mul_ps(fx, \
|
|
|
|
|
*reinterpret_cast<const __m256*>(_ps256_cephes_exp_C1)); \
|
|
|
|
|
__m256 z = _mm256_mul_ps( \
|
|
|
|
|
fx, *reinterpret_cast<const __m256*>(_ps256_cephes_exp_C2)); \
|
|
|
|
|
x = _mm256_sub_ps(x, tmp); \
|
|
|
|
|
x = _mm256_sub_ps(x, z); \
|
|
|
|
|
z = _mm256_mul_ps(x, x); \
|
|
|
|
|
__m256 y = *reinterpret_cast<const __m256*>(_ps256_cephes_exp_p0); \
|
|
|
|
|
y = _mm256_mul_ps(y, x); \
|
|
|
|
|
y = _mm256_add_ps(y, \
|
|
|
|
|
*reinterpret_cast<const __m256*>(_ps256_cephes_exp_p1)); \
|
|
|
|
|
y = _mm256_mul_ps(y, x); \
|
|
|
|
|
y = _mm256_add_ps(y, \
|
|
|
|
|
*reinterpret_cast<const __m256*>(_ps256_cephes_exp_p2)); \
|
|
|
|
|
y = _mm256_mul_ps(y, x); \
|
|
|
|
|
y = _mm256_add_ps(y, \
|
|
|
|
|
*reinterpret_cast<const __m256*>(_ps256_cephes_exp_p3)); \
|
|
|
|
|
y = _mm256_mul_ps(y, x); \
|
|
|
|
|
y = _mm256_add_ps(y, \
|
|
|
|
|
*reinterpret_cast<const __m256*>(_ps256_cephes_exp_p4)); \
|
|
|
|
|
y = _mm256_mul_ps(y, x); \
|
|
|
|
|
y = _mm256_add_ps(y, \
|
|
|
|
|
*reinterpret_cast<const __m256*>(_ps256_cephes_exp_p5)); \
|
|
|
|
|
y = _mm256_mul_ps(y, z); \
|
|
|
|
|
y = _mm256_add_ps(y, x); \
|
|
|
|
|
y = _mm256_add_ps(y, one); \
|
|
|
|
|
/* build 2^n */ \
|
|
|
|
|
imm0 = _mm256_cvttps_epi32(fx)
|
|
|
|
|
|
|
|
|
|
__m256 ExpAVX(__m256 x) {
|
|
|
|
|
AVXEXP_BASE;
|
|
|
|
|
// two AVX2 instructions using SSE2
|
|
|
|
|
imm0 = avx2_mm256_add_epi32(imm0,
|
|
|
|
|
*reinterpret_cast<const __m256i*>(_pi256_0x7f));
|
|
|
|
|
imm0 = avx2_mm256_slli_epi32(imm0, 23);
|
|
|
|
|
__m256 pow2n = _mm256_castsi256_ps(imm0);
|
|
|
|
|
y = _mm256_mul_ps(y, pow2n);
|
|
|
|
|
return y;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
#ifdef __AVX2__
|
|
|
|
|
__m256 ExpAVX2(__m256 x) {
|
|
|
|
|
AVXEXP_BASE;
|
|
|
|
|
// two AVX2 instructions
|
|
|
|
|
imm0 = _mm256_add_epi32(imm0, *reinterpret_cast<const __m256i*>(_pi256_0x7f));
|
|
|
|
|
imm0 = _mm256_slli_epi32(imm0, 23);
|
|
|
|
|
__m256 pow2n = _mm256_castsi256_ps(imm0);
|
|
|
|
|
y = _mm256_mul_ps(y, pow2n);
|
|
|
|
|
return y;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
} // namespace detail
|
|
|
|
|
} // namespace jitkernel
|
|
|
|
|
} // namespace math
|
|
|
|
|
} // namespace operators
|
|
|
|
|