|
|
|
@ -13,8 +13,7 @@ See the License for the specific language governing permissions and
|
|
|
|
|
limitations under the License. */
|
|
|
|
|
|
|
|
|
|
#include "paddle/fluid/operators/math/jit_code.h"
|
|
|
|
|
#include "paddle/fluid/operators/math/jit_kernel.h"
|
|
|
|
|
#include "paddle/fluid/platform/cpu_info.h"
|
|
|
|
|
#include "paddle/fluid/operators/math/jit_kernel.h" // TODO(TJ): remove me
|
|
|
|
|
|
|
|
|
|
namespace paddle {
|
|
|
|
|
namespace operators {
|
|
|
|
@ -111,41 +110,7 @@ void VXXJitCode::generate() {
|
|
|
|
|
ret();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define ALIGN32 __attribute__((aligned(32)))
|
|
|
|
|
#define EXP_HIG 88.3762626647949f
|
|
|
|
|
#define EXP_LOW -88.3762626647949f
|
|
|
|
|
#define CEPHES_LOG2EF 1.44269504088896341
|
|
|
|
|
#define CEPHES_EXP_C1 0.693359375
|
|
|
|
|
#define CEPHES_EXP_C2 -2.12194440e-4
|
|
|
|
|
#define CEPHES_EXP_P0 1.9875691500E-4
|
|
|
|
|
#define CEPHES_EXP_P1 1.3981999507E-3
|
|
|
|
|
#define CEPHES_EXP_P2 8.3334519073E-3
|
|
|
|
|
#define CEPHES_EXP_P3 4.1665795894E-2
|
|
|
|
|
#define CEPHES_EXP_P4 1.6666665459E-1
|
|
|
|
|
#define CEPHES_EXP_P5 5.0000001201E-1
|
|
|
|
|
|
|
|
|
|
#define REPEAT_8TIMES(val) val, val, val, val, val, val, val, val
|
|
|
|
|
|
|
|
|
|
#define OFFSET_EXP_ONE 0 * YMM_FLOAT_BLOCK * sizeof(float)
|
|
|
|
|
#define OFFSET_EXP_TWO 1 * YMM_FLOAT_BLOCK * sizeof(float)
|
|
|
|
|
#define OFFSET_EXP_0P5 2 * YMM_FLOAT_BLOCK * sizeof(float)
|
|
|
|
|
#define OFFSET_EXP_HIG 3 * YMM_FLOAT_BLOCK * sizeof(float)
|
|
|
|
|
#define OFFSET_EXP_LOW 4 * YMM_FLOAT_BLOCK * sizeof(float)
|
|
|
|
|
#define OFFSET_EXP_LOG2EF 5 * YMM_FLOAT_BLOCK * sizeof(float)
|
|
|
|
|
#define OFFSET_EXP_C1 6 * YMM_FLOAT_BLOCK * sizeof(float)
|
|
|
|
|
#define OFFSET_EXP_C2 7 * YMM_FLOAT_BLOCK * sizeof(float)
|
|
|
|
|
#define OFFSET_EXP_P0 8 * YMM_FLOAT_BLOCK * sizeof(float)
|
|
|
|
|
#define OFFSET_EXP_P1 9 * YMM_FLOAT_BLOCK * sizeof(float)
|
|
|
|
|
#define OFFSET_EXP_P2 10 * YMM_FLOAT_BLOCK * sizeof(float)
|
|
|
|
|
#define OFFSET_EXP_P3 11 * YMM_FLOAT_BLOCK * sizeof(float)
|
|
|
|
|
#define OFFSET_EXP_P4 12 * YMM_FLOAT_BLOCK * sizeof(float)
|
|
|
|
|
#define OFFSET_EXP_P5 13 * YMM_FLOAT_BLOCK * sizeof(float)
|
|
|
|
|
#define OFFSET_EXP_MAX_INPUT 14 * YMM_FLOAT_BLOCK * sizeof(float)
|
|
|
|
|
#define OFFSET_SIGMOID_MAX 15 * YMM_FLOAT_BLOCK * sizeof(float)
|
|
|
|
|
#define OFFSET_SIGMOID_MIN 16 * YMM_FLOAT_BLOCK * sizeof(float)
|
|
|
|
|
|
|
|
|
|
static const float exp_float_consts[] ALIGN32 = {
|
|
|
|
|
REPEAT_8TIMES(1.f),
|
|
|
|
|
const float exp_float_consts[] ALIGN32 = {REPEAT_8TIMES(1.f),
|
|
|
|
|
REPEAT_8TIMES(2.f),
|
|
|
|
|
REPEAT_8TIMES(0.5f),
|
|
|
|
|
REPEAT_8TIMES(EXP_HIG),
|
|
|
|
@ -163,8 +128,8 @@ static const float exp_float_consts[] ALIGN32 = {
|
|
|
|
|
REPEAT_8TIMES(SIGMOID_THRESHOLD_MAX),
|
|
|
|
|
REPEAT_8TIMES(SIGMOID_THRESHOLD_MIN)};
|
|
|
|
|
|
|
|
|
|
static const int exp_int_0x7f[] ALIGN32 = {REPEAT_8TIMES(0x7f)};
|
|
|
|
|
static int g_tmp_mem[16] ALIGN32 = {0};
|
|
|
|
|
const int exp_int_0x7f[] ALIGN32 = {REPEAT_8TIMES(0x7f)};
|
|
|
|
|
int g_tmp_mem[16] ALIGN32 = {0};
|
|
|
|
|
|
|
|
|
|
bool VActJitCode::init(int d, operand_type type) {
|
|
|
|
|
bool ok = MayIUse(avx);
|
|
|
|
@ -177,146 +142,6 @@ bool VActJitCode::init(int d, operand_type type) {
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void VActJitCode::exp_ymm(ymm_t& ymm_dst, ymm_t& ymm_src, int fx_idx,
|
|
|
|
|
int fy_idx, int mask_idx, int tmp_idx) {
|
|
|
|
|
assert(ymm_src.getIdx() != ymm_dst.getIdx()); // TODO(TJ): use enfore
|
|
|
|
|
// check all idx can not equal
|
|
|
|
|
ymm_t ymm_fx = ymm_t(fx_idx);
|
|
|
|
|
ymm_t ymm_fy = ymm_t(fy_idx);
|
|
|
|
|
ymm_t ymm_mask = ymm_t(mask_idx);
|
|
|
|
|
ymm_t ymm_tmp = ymm_t(tmp_idx);
|
|
|
|
|
reg64_t reg_ptr_global = rax;
|
|
|
|
|
push(reg_ptr_global);
|
|
|
|
|
mov(reg_ptr_global, reinterpret_cast<size_t>(exp_float_consts));
|
|
|
|
|
vmovaps(ymm_tmp, ptr[reg_ptr_global + OFFSET_EXP_HIG]);
|
|
|
|
|
vminps(ymm_src, ymm_src, ymm_tmp);
|
|
|
|
|
vmovaps(ymm_tmp, ptr[reg_ptr_global + OFFSET_EXP_LOW]);
|
|
|
|
|
vmaxps(ymm_src, ymm_src, ymm_tmp);
|
|
|
|
|
// express exp(x) as exp(g + n*log(2))
|
|
|
|
|
vmovaps(ymm_tmp, ptr[reg_ptr_global + OFFSET_EXP_LOG2EF]);
|
|
|
|
|
vmulps(ymm_fx, ymm_src, ymm_tmp);
|
|
|
|
|
vmovaps(ymm_tmp, ptr[reg_ptr_global + OFFSET_EXP_0P5]);
|
|
|
|
|
vaddps(ymm_fx, ymm_fx, ymm_tmp);
|
|
|
|
|
vroundps(ymm_fy, ymm_fx, 0x01);
|
|
|
|
|
// if greater, substract 1
|
|
|
|
|
vcmpgtps(ymm_mask, ymm_fy, ymm_fx);
|
|
|
|
|
vmovaps(ymm_tmp, ptr[reg_ptr_global]);
|
|
|
|
|
vandps(ymm_mask, ymm_mask, ymm_tmp);
|
|
|
|
|
vsubps(ymm_fx, ymm_fy, ymm_mask);
|
|
|
|
|
vmovaps(ymm_tmp, ptr[reg_ptr_global + OFFSET_EXP_C1]);
|
|
|
|
|
vmulps(ymm_fy, ymm_fx, ymm_tmp);
|
|
|
|
|
vmovaps(ymm_tmp, ptr[reg_ptr_global + OFFSET_EXP_C2]);
|
|
|
|
|
ymm_t ymm_z = ymm_t(ymm_mask.getIdx());
|
|
|
|
|
vmulps(ymm_z, ymm_fx, ymm_tmp);
|
|
|
|
|
vsubps(ymm_src, ymm_src, ymm_fy);
|
|
|
|
|
vsubps(ymm_src, ymm_src, ymm_z);
|
|
|
|
|
vmulps(ymm_z, ymm_src, ymm_src);
|
|
|
|
|
vmovaps(ymm_tmp, ptr[reg_ptr_global + OFFSET_EXP_P0]);
|
|
|
|
|
vmulps(ymm_dst, ymm_src, ymm_tmp);
|
|
|
|
|
for (size_t i = OFFSET_EXP_P1; i < OFFSET_EXP_P5;
|
|
|
|
|
i += (YMM_FLOAT_BLOCK * sizeof(float))) {
|
|
|
|
|
vmovaps(ymm_tmp, ptr[reg_ptr_global + i]); // P1~P4
|
|
|
|
|
vaddps(ymm_dst, ymm_dst, ymm_tmp);
|
|
|
|
|
vmulps(ymm_dst, ymm_dst, ymm_src);
|
|
|
|
|
}
|
|
|
|
|
vmovaps(ymm_tmp, ptr[reg_ptr_global + OFFSET_EXP_P5]);
|
|
|
|
|
vaddps(ymm_dst, ymm_dst, ymm_tmp);
|
|
|
|
|
vmulps(ymm_dst, ymm_dst, ymm_z);
|
|
|
|
|
vaddps(ymm_dst, ymm_dst, ymm_src);
|
|
|
|
|
vmovaps(ymm_tmp, ptr[reg_ptr_global]);
|
|
|
|
|
vaddps(ymm_dst, ymm_dst, ymm_tmp);
|
|
|
|
|
// build 2^n
|
|
|
|
|
ymm_t ymm_int = ymm_fx;
|
|
|
|
|
vcvttps2dq(ymm_int, ymm_fx);
|
|
|
|
|
mov(reg_ptr_global, reinterpret_cast<size_t>(exp_int_0x7f));
|
|
|
|
|
vmovdqa(ymm_tmp, ptr[reg_ptr_global]);
|
|
|
|
|
if (MayIUse(avx2)) {
|
|
|
|
|
vpaddd(ymm_int, ymm_int, ymm_tmp);
|
|
|
|
|
vpslld(ymm_int, ymm_int, 23);
|
|
|
|
|
} else if (MayIUse(avx)) {
|
|
|
|
|
xmm_t xtmp1 = xmm_t(ymm_int.getIdx());
|
|
|
|
|
xmm_t xtmp2 = xmm_t(ymm_tmp.getIdx());
|
|
|
|
|
reg64_t reg_ptr_tmp = reg_ptr_global;
|
|
|
|
|
mov(reg_ptr_tmp, reinterpret_cast<size_t>(g_tmp_mem));
|
|
|
|
|
vmovdqa(ptr[reg_ptr_tmp], ymm_int);
|
|
|
|
|
vmovdqa(ptr[reg_ptr_tmp + YMM_FLOAT_BLOCK * sizeof(float)], ymm_tmp);
|
|
|
|
|
vpaddd(xtmp1, xtmp1, xtmp2);
|
|
|
|
|
vpslld(xtmp1, xtmp1, 23);
|
|
|
|
|
vmovdqa(ptr[reg_ptr_tmp], xtmp1);
|
|
|
|
|
// next 128bits
|
|
|
|
|
vmovdqa(xtmp1, ptr[reg_ptr_tmp + 4 /*xmm float block*/ * sizeof(float)]);
|
|
|
|
|
vmovdqa(xtmp2,
|
|
|
|
|
ptr[reg_ptr_tmp +
|
|
|
|
|
(YMM_FLOAT_BLOCK + 4 /*xmm float block*/) * sizeof(float)]);
|
|
|
|
|
vpaddd(xtmp1, xtmp1, xtmp2);
|
|
|
|
|
vpslld(xtmp1, xtmp1, 23);
|
|
|
|
|
vmovdqa(ptr[reg_ptr_tmp + 4 /*xmm float block*/ * sizeof(float)], xtmp1);
|
|
|
|
|
// load out
|
|
|
|
|
vmovdqa(ymm_int, ptr[reg_ptr_tmp]);
|
|
|
|
|
}
|
|
|
|
|
vmulps(ymm_dst, ymm_dst, ymm_int);
|
|
|
|
|
pop(reg_ptr_global);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void VActJitCode::exp_xmm(xmm_t& ymm_dst, xmm_t& ymm_src, int fx_idx,
|
|
|
|
|
int fy_idx, int mask_idx, int tmp_idx) {
|
|
|
|
|
assert(ymm_src.getIdx() != ymm_dst.getIdx()); // TODO(TJ): use enfore
|
|
|
|
|
// check all idx can not equal
|
|
|
|
|
xmm_t ymm_fx = xmm_t(fx_idx);
|
|
|
|
|
xmm_t ymm_fy = xmm_t(fy_idx);
|
|
|
|
|
xmm_t ymm_mask = xmm_t(mask_idx);
|
|
|
|
|
xmm_t ymm_tmp = xmm_t(tmp_idx);
|
|
|
|
|
reg64_t reg_ptr_global = rax;
|
|
|
|
|
push(reg_ptr_global);
|
|
|
|
|
mov(reg_ptr_global, reinterpret_cast<size_t>(exp_float_consts));
|
|
|
|
|
vmovaps(ymm_tmp, ptr[reg_ptr_global + OFFSET_EXP_HIG]);
|
|
|
|
|
vminps(ymm_src, ymm_src, ymm_tmp);
|
|
|
|
|
vmovaps(ymm_tmp, ptr[reg_ptr_global + OFFSET_EXP_LOW]);
|
|
|
|
|
vmaxps(ymm_src, ymm_src, ymm_tmp);
|
|
|
|
|
// express exp(x) as exp(g + n*log(2))
|
|
|
|
|
vmovaps(ymm_tmp, ptr[reg_ptr_global + OFFSET_EXP_LOG2EF]);
|
|
|
|
|
vmulps(ymm_fx, ymm_src, ymm_tmp);
|
|
|
|
|
vmovaps(ymm_tmp, ptr[reg_ptr_global + OFFSET_EXP_0P5]);
|
|
|
|
|
vaddps(ymm_fx, ymm_fx, ymm_tmp);
|
|
|
|
|
vroundps(ymm_fy, ymm_fx, 0x01);
|
|
|
|
|
// if greater, substract 1
|
|
|
|
|
vcmpgtps(ymm_mask, ymm_fy, ymm_fx);
|
|
|
|
|
vmovaps(ymm_tmp, ptr[reg_ptr_global]);
|
|
|
|
|
vandps(ymm_mask, ymm_mask, ymm_tmp);
|
|
|
|
|
vsubps(ymm_fx, ymm_fy, ymm_mask);
|
|
|
|
|
vmovaps(ymm_tmp, ptr[reg_ptr_global + OFFSET_EXP_C1]);
|
|
|
|
|
vmulps(ymm_fy, ymm_fx, ymm_tmp);
|
|
|
|
|
vmovaps(ymm_tmp, ptr[reg_ptr_global + OFFSET_EXP_C2]);
|
|
|
|
|
xmm_t ymm_z = xmm_t(ymm_mask.getIdx());
|
|
|
|
|
vmulps(ymm_z, ymm_fx, ymm_tmp);
|
|
|
|
|
vsubps(ymm_src, ymm_src, ymm_fy);
|
|
|
|
|
vsubps(ymm_src, ymm_src, ymm_z);
|
|
|
|
|
vmulps(ymm_z, ymm_src, ymm_src);
|
|
|
|
|
vmovaps(ymm_tmp, ptr[reg_ptr_global + OFFSET_EXP_P0]);
|
|
|
|
|
vmulps(ymm_dst, ymm_src, ymm_tmp);
|
|
|
|
|
for (size_t i = OFFSET_EXP_P1; i < OFFSET_EXP_P5;
|
|
|
|
|
i += (YMM_FLOAT_BLOCK * sizeof(float))) {
|
|
|
|
|
vmovaps(ymm_tmp, ptr[reg_ptr_global + i]); // P1~P4
|
|
|
|
|
vaddps(ymm_dst, ymm_dst, ymm_tmp);
|
|
|
|
|
vmulps(ymm_dst, ymm_dst, ymm_src);
|
|
|
|
|
}
|
|
|
|
|
vmovaps(ymm_tmp, ptr[reg_ptr_global + OFFSET_EXP_P5]);
|
|
|
|
|
vaddps(ymm_dst, ymm_dst, ymm_tmp);
|
|
|
|
|
vmulps(ymm_dst, ymm_dst, ymm_z);
|
|
|
|
|
vaddps(ymm_dst, ymm_dst, ymm_src);
|
|
|
|
|
vmovaps(ymm_tmp, ptr[reg_ptr_global]);
|
|
|
|
|
vaddps(ymm_dst, ymm_dst, ymm_tmp);
|
|
|
|
|
// build 2^n
|
|
|
|
|
xmm_t ymm_int = ymm_fx;
|
|
|
|
|
vcvttps2dq(ymm_int, ymm_fx);
|
|
|
|
|
mov(reg_ptr_global, reinterpret_cast<size_t>(exp_int_0x7f));
|
|
|
|
|
vmovdqa(ymm_tmp, ptr[reg_ptr_global]);
|
|
|
|
|
vpaddd(ymm_int, ymm_int, ymm_tmp);
|
|
|
|
|
vpslld(ymm_int, ymm_int, 23);
|
|
|
|
|
vmulps(ymm_dst, ymm_dst, ymm_int);
|
|
|
|
|
pop(reg_ptr_global);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void VActJitCode::sigmoid_ymm(ymm_t& ymm_dst, ymm_t& ymm_src, int fx_idx,
|
|
|
|
|
int fy_idx, int mask_idx, int tmp_idx) {
|
|
|
|
|
// y = 1 / (1 + e^-x)
|
|
|
|
@ -330,7 +155,7 @@ void VActJitCode::sigmoid_ymm(ymm_t& ymm_dst, ymm_t& ymm_src, int fx_idx,
|
|
|
|
|
vmaxps(ymm_src, ymm_src, ymm_tmp);
|
|
|
|
|
vxorps(ymm_tmp, ymm_tmp, ymm_tmp);
|
|
|
|
|
vsubps(ymm_src, ymm_tmp, ymm_src);
|
|
|
|
|
exp_ymm(ymm_dst, ymm_src, fx_idx, fy_idx, mask_idx, tmp_idx);
|
|
|
|
|
exp_jmm<ymm_t>(ymm_dst, ymm_src, fx_idx, fy_idx, mask_idx, tmp_idx);
|
|
|
|
|
vmovaps(ymm_tmp, ptr[reg_ptr_global + OFFSET_EXP_ONE]);
|
|
|
|
|
vaddps(ymm_dst, ymm_dst, ymm_tmp);
|
|
|
|
|
vdivps(ymm_dst, ymm_tmp, ymm_dst);
|
|
|
|
@ -349,7 +174,7 @@ void VActJitCode::tanh_ymm(ymm_t& ymm_dst, ymm_t& ymm_src, int fx_idx,
|
|
|
|
|
vxorps(ymm_zero, ymm_zero, ymm_zero);
|
|
|
|
|
vsubps(ymm_tmp, ymm_zero, ymm_tmp);
|
|
|
|
|
vmulps(ymm_src, ymm_src, ymm_tmp);
|
|
|
|
|
exp_ymm(ymm_dst, ymm_src, fx_idx, fy_idx, mask_idx, tmp_idx);
|
|
|
|
|
exp_jmm<ymm_t>(ymm_dst, ymm_src, fx_idx, fy_idx, mask_idx, tmp_idx);
|
|
|
|
|
vmovaps(ymm_tmp, ptr[reg_ptr_global + OFFSET_EXP_ONE]);
|
|
|
|
|
vaddps(ymm_dst, ymm_dst, ymm_tmp);
|
|
|
|
|
vmovaps(ymm_tmp, ptr[reg_ptr_global + OFFSET_EXP_TWO]);
|
|
|
|
@ -373,7 +198,7 @@ void VActJitCode::generate() {
|
|
|
|
|
relu_jmm<ymm_t>(ymm_dst, ymm_src, ymm_zero);
|
|
|
|
|
break;
|
|
|
|
|
case operand_type::exp:
|
|
|
|
|
exp_ymm(ymm_dst, ymm_src, 2, 3, 4, 5);
|
|
|
|
|
exp_jmm<ymm_t>(ymm_dst, ymm_src, 2, 3, 4, 5);
|
|
|
|
|
break;
|
|
|
|
|
case operand_type::sigmoid:
|
|
|
|
|
sigmoid_ymm(ymm_dst, ymm_src, 2, 3, 4, 5);
|
|
|
|
@ -409,7 +234,7 @@ void VActJitCode::generate() {
|
|
|
|
|
relu_jmm<xmm_t>(xmm_dst, xmm_src, xmm_zero);
|
|
|
|
|
break;
|
|
|
|
|
case operand_type::exp:
|
|
|
|
|
exp_xmm(xmm_dst, xmm_src, 2, 3, 4, 5);
|
|
|
|
|
exp_jmm<xmm_t>(xmm_dst, xmm_src, 2, 3, 4, 5);
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|