|
|
|
@ -49,6 +49,46 @@ void relu(const float* a, float* b, int len) {
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// b[i] = a[i] > 0.0f ? a[i] : a[i] * w
|
|
|
|
|
void prelu(const float* a, float w, float* b, int len) {
|
|
|
|
|
int offset = len % 16;
|
|
|
|
|
float32x4_t ma0, ma1, ma2, ma3;
|
|
|
|
|
|
|
|
|
|
float32x4_t zero = vdupq_n_f32(0.f);
|
|
|
|
|
float32x4_t vw = vdupq_n_f32(w);
|
|
|
|
|
|
|
|
|
|
for (int k = 0; k < len / 16; k++, a += 16, b += 16) {
|
|
|
|
|
ma0 = vld1q_f32(a);
|
|
|
|
|
ma1 = vld1q_f32(a + 4);
|
|
|
|
|
ma2 = vld1q_f32(a + 8);
|
|
|
|
|
ma3 = vld1q_f32(a + 12);
|
|
|
|
|
|
|
|
|
|
uint32x4_t flag0 = vcgtq_f32(ma0, zero);
|
|
|
|
|
uint32x4_t flag1 = vcgtq_f32(ma1, zero);
|
|
|
|
|
uint32x4_t flag2 = vcgtq_f32(ma2, zero);
|
|
|
|
|
uint32x4_t flag3 = vcgtq_f32(ma3, zero);
|
|
|
|
|
|
|
|
|
|
float32x4_t mul0 = vmulq_f32(ma0, vw);
|
|
|
|
|
float32x4_t mul1 = vmulq_f32(ma1, vw);
|
|
|
|
|
float32x4_t mul2 = vmulq_f32(ma2, vw);
|
|
|
|
|
float32x4_t mul3 = vmulq_f32(ma3, vw);
|
|
|
|
|
|
|
|
|
|
ma0 = vbslq_f32(flag0, ma0, mul0);
|
|
|
|
|
ma1 = vbslq_f32(flag1, ma1, mul1);
|
|
|
|
|
ma2 = vbslq_f32(flag2, ma2, mul2);
|
|
|
|
|
ma3 = vbslq_f32(flag3, ma3, mul3);
|
|
|
|
|
|
|
|
|
|
vst1q_f32(b, ma0);
|
|
|
|
|
vst1q_f32(b + 4, ma1);
|
|
|
|
|
vst1q_f32(b + 8, ma2);
|
|
|
|
|
vst1q_f32(b + 12, ma3);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (int i = 0; i < offset; i++) {
|
|
|
|
|
b[i] = a[i] > 0.0f ? a[i] : a[i] * w;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
} // namespace neon
|
|
|
|
|
} // namespace paddle
|
|
|
|
|
|
|
|
|
|