diff --git a/mindspore/lite/nnacl/fp16/activation_fp16.c b/mindspore/lite/nnacl/fp16/activation_fp16.c index 7640cc302a..33060ed46f 100644 --- a/mindspore/lite/nnacl/fp16/activation_fp16.c +++ b/mindspore/lite/nnacl/fp16/activation_fp16.c @@ -66,7 +66,7 @@ int SigmoidFp16(const float16_t *src, float16_t *dst, int ele_num) { return NNACL_OK; } -float16_t TanhOpt(float16_t src) { +float16_t TanhOptFp16(float16_t src) { if (src > 5.0) { return 1.0f; } else if (src < -5.0) { @@ -81,7 +81,7 @@ float16_t TanhOpt(float16_t src) { int TanhFp16(const float16_t *src, float16_t *dst, int ele_num) { for (int i = 0; i < ele_num; ++i) { - dst[i] = TanhOpt(src[i]); + dst[i] = TanhOptFp16(src[i]); } return NNACL_OK; } diff --git a/mindspore/lite/nnacl/fp16/crop_fp16.c b/mindspore/lite/nnacl/fp16/crop_fp16.c index 4df3399162..b44d6b9bb6 100644 --- a/mindspore/lite/nnacl/fp16/crop_fp16.c +++ b/mindspore/lite/nnacl/fp16/crop_fp16.c @@ -20,25 +20,25 @@ #include "nnacl/crop_parameter.h" -void Crop(const float16_t *input, float16_t *output, int task_id, CropParameter *para) { +void Fp16Crop(const float16_t *input, float16_t *output, int task_id, CropParameter *para) { int input_dim = para->input_dim_; switch (input_dim) { case 1: - Crop1D(input, output, task_id, para); + Fp16Crop1D(input, output, task_id, para); break; case 2: - Crop2D(input, output, task_id, para); + Fp16Crop2D(input, output, task_id, para); break; case 3: - Crop3D(input, output, task_id, para); + Fp16Crop3D(input, output, task_id, para); break; case 4: - Crop4D(input, output, task_id, para); + Fp16Crop4D(input, output, task_id, para); break; } } -void Crop1D(const float16_t *input, float16_t *output, int task_id, CropParameter *para) { +void Fp16Crop1D(const float16_t *input, float16_t *output, int task_id, CropParameter *para) { const int out_batch = para->out_shape_[0]; const int thread_count = para->thread_count_; int64_t task_id_stride = thread_count > 1 ? UP_DIV(out_batch, thread_count) : out_batch; @@ -55,7 +55,7 @@ void Crop1D(const float16_t *input, float16_t *output, int task_id, CropParamete memcpy(out_ptr, in_ptr, sizeof(float16_t) * out_dist_stride); } -void Crop2D(const float16_t *input, float16_t *output, int task_id, CropParameter *para) { +void Fp16Crop2D(const float16_t *input, float16_t *output, int task_id, CropParameter *para) { const int in_height = para->in_shape_[1]; const int out_batch = para->out_shape_[0]; const int out_height = para->out_shape_[1]; @@ -77,7 +77,7 @@ void Crop2D(const float16_t *input, float16_t *output, int task_id, CropParamete } } -void Crop3D(const float16_t *input, float16_t *output, int task_id, CropParameter *para) { +void Fp16Crop3D(const float16_t *input, float16_t *output, int task_id, CropParameter *para) { const int in_height = para->in_shape_[1]; const int in_width = para->in_shape_[2]; @@ -111,7 +111,7 @@ void Crop3D(const float16_t *input, float16_t *output, int task_id, CropParamete } } -void Crop4D(const float16_t *input, float16_t *output, int task_id, CropParameter *para) { +void Fp16Crop4D(const float16_t *input, float16_t *output, int task_id, CropParameter *para) { const int in_height = para->in_shape_[1]; const int in_width = para->in_shape_[2]; const int in_channel = para->in_shape_[3]; diff --git a/mindspore/lite/nnacl/fp16/crop_fp16.h b/mindspore/lite/nnacl/fp16/crop_fp16.h index a0f723d581..18530b674b 100644 --- a/mindspore/lite/nnacl/fp16/crop_fp16.h +++ b/mindspore/lite/nnacl/fp16/crop_fp16.h @@ -24,11 +24,11 @@ #ifdef __cplusplus extern "C" { #endif -void Crop(const float16_t *input, float16_t *output, int task_id, CropParameter *para); -void Crop1D(const float16_t *input, float16_t *output, int task_id, CropParameter *para); -void Crop2D(const float16_t *input, float16_t *output, int task_id, CropParameter *para); -void Crop3D(const float16_t *input, float16_t *output, int task_id, CropParameter *para); -void Crop4D(const float16_t *input, float16_t *output, int task_id, CropParameter *para); +void Fp16Crop(const float16_t *input, float16_t *output, int task_id, CropParameter *para); +void Fp16Crop1D(const float16_t *input, float16_t *output, int task_id, CropParameter *para); +void Fp16Crop2D(const float16_t *input, float16_t *output, int task_id, CropParameter *para); +void Fp16Crop3D(const float16_t *input, float16_t *output, int task_id, CropParameter *para); +void Fp16Crop4D(const float16_t *input, float16_t *output, int task_id, CropParameter *para); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/nnacl/fp16/scale_fp16.c b/mindspore/lite/nnacl/fp16/scale_fp16.c index 1b96499484..94d5560c2a 100644 --- a/mindspore/lite/nnacl/fp16/scale_fp16.c +++ b/mindspore/lite/nnacl/fp16/scale_fp16.c @@ -16,8 +16,8 @@ #include "nnacl/fp16/scale_fp16.h" -void ScaleInner(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int outer_start, - int outer_end, int axis_size, int inner_size) { +void Fp16ScaleInner(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int outer_start, + int outer_end, int axis_size, int inner_size) { for (int out = outer_start; out < outer_end; out++) { int out_offset = out * axis_size * inner_size; for (int i = 0; i < axis_size; i++) { @@ -42,8 +42,8 @@ void ScaleInner(float16_t *in_data, float16_t *out_data, float16_t *scale, float } } -void ScaleAxis(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int outer_start, - int outer_end, int axis_size) { +void Fp16ScaleAxis(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int outer_start, + int outer_end, int axis_size) { for (int out = outer_start; out < outer_end; out++) { int out_offset = out * axis_size; int index = 0; @@ -71,15 +71,15 @@ void DoScaleFp16(float16_t *in_data, float16_t *out_data, float16_t *scale, floa int outer_end = MSMIN(outer_start + outer_step, scale_param->outer_size_); if (scale_param->inner_size_ == 1) { - ScaleAxis(in_data, out_data, scale, offset, outer_start, outer_end, scale_param->axis_size_); + Fp16ScaleAxis(in_data, out_data, scale, offset, outer_start, outer_end, scale_param->axis_size_); } else { - ScaleInner(in_data, out_data, scale, offset, outer_start, outer_end, scale_param->axis_size_, - scale_param->inner_size_); + Fp16ScaleInner(in_data, out_data, scale, offset, outer_start, outer_end, scale_param->axis_size_, + scale_param->inner_size_); } } -void ScaleInnerRelu(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int outer_start, - int outer_end, int axis_size, int inner_size) { +void Fp16ScaleInnerRelu(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int outer_start, + int outer_end, int axis_size, int inner_size) { #ifdef ENABLE_ARM64 float16x8_t zeros = {0, 0, 0, 0, 0, 0, 0, 0}; #endif @@ -108,8 +108,8 @@ void ScaleInnerRelu(float16_t *in_data, float16_t *out_data, float16_t *scale, f } } -void ScaleAxisRelu(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int outer_start, - int outer_end, int axis_size) { +void Fp16ScaleAxisRelu(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int outer_start, + int outer_end, int axis_size) { #ifdef ENABLE_ARM64 float16x8_t zeros = {0, 0, 0, 0, 0, 0, 0, 0}; #endif @@ -135,22 +135,22 @@ void ScaleAxisRelu(float16_t *in_data, float16_t *out_data, float16_t *scale, fl } } -void DoScaleReluFp16(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int task_id, +void Fp16DoScaleRelu(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int task_id, ScaleParameter *scale_param) { int outer_step = UP_DIV(scale_param->outer_size_, scale_param->op_parameter_.thread_num_); int outer_start = task_id * outer_step; int outer_end = MSMIN(outer_start + outer_step, scale_param->outer_size_); if (scale_param->inner_size_ == 1) { - ScaleAxisRelu(in_data, out_data, scale, offset, outer_start, outer_end, scale_param->axis_size_); + Fp16ScaleAxisRelu(in_data, out_data, scale, offset, outer_start, outer_end, scale_param->axis_size_); } else { - ScaleInnerRelu(in_data, out_data, scale, offset, outer_start, outer_end, scale_param->axis_size_, - scale_param->inner_size_); + Fp16ScaleInnerRelu(in_data, out_data, scale, offset, outer_start, outer_end, scale_param->axis_size_, + scale_param->inner_size_); } } -void ScaleInnerRelu6(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int outer_start, - int outer_end, int axis_size, int inner_size) { +void Fp16ScaleInnerRelu6(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int outer_start, + int outer_end, int axis_size, int inner_size) { #ifdef ENABLE_ARM64 float16x8_t zeros = {0, 0, 0, 0, 0, 0, 0, 0}; float16x8_t bounds = {6, 6, 6, 6, 6, 6, 6, 6}; @@ -180,8 +180,8 @@ void ScaleInnerRelu6(float16_t *in_data, float16_t *out_data, float16_t *scale, } } -void ScaleAxisRelu6(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int outer_start, - int outer_end, int axis_size) { +void Fp16ScaleAxisRelu6(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int outer_start, + int outer_end, int axis_size) { #ifdef ENABLE_ARM64 float16x8_t zeros = {0, 0, 0, 0, 0, 0, 0, 0}; float16x8_t bounds = {6, 6, 6, 6, 6, 6, 6, 6}; @@ -215,9 +215,9 @@ void DoScaleRelu6Fp16(float16_t *in_data, float16_t *out_data, float16_t *scale, int outer_end = MSMIN(outer_start + outer_step, scale_param->outer_size_); if (scale_param->inner_size_ == 1) { - ScaleAxisRelu6(in_data, out_data, scale, offset, outer_start, outer_end, scale_param->axis_size_); + Fp16ScaleAxisRelu6(in_data, out_data, scale, offset, outer_start, outer_end, scale_param->axis_size_); } else { - ScaleInnerRelu6(in_data, out_data, scale, offset, outer_start, outer_end, scale_param->axis_size_, - scale_param->inner_size_); + Fp16ScaleInnerRelu6(in_data, out_data, scale, offset, outer_start, outer_end, scale_param->axis_size_, + scale_param->inner_size_); } } diff --git a/mindspore/lite/nnacl/fp16/scale_fp16.h b/mindspore/lite/nnacl/fp16/scale_fp16.h index 036d02a7e7..6a391c495b 100644 --- a/mindspore/lite/nnacl/fp16/scale_fp16.h +++ b/mindspore/lite/nnacl/fp16/scale_fp16.h @@ -27,7 +27,7 @@ extern "C" { #endif void DoScaleFp16(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int task_id, ScaleParameter *scale_param); -void DoScaleReluFp16(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int task_id, +void Fp16DoScaleRelu(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int task_id, ScaleParameter *scale_param); void DoScaleRelu6Fp16(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int task_id, ScaleParameter *scale_param); diff --git a/mindspore/lite/nnacl/fp16/stack_fp16.c b/mindspore/lite/nnacl/fp16/stack_fp16.c index 1a282d91dd..122657d559 100644 --- a/mindspore/lite/nnacl/fp16/stack_fp16.c +++ b/mindspore/lite/nnacl/fp16/stack_fp16.c @@ -17,7 +17,7 @@ #include "nnacl/fp16/stack_fp16.h" #include "nnacl/arithmetic_common.h" -size_t GetStackCopyNum(int axis, int *in_shape, size_t shape_size) { +size_t Fp16GetStackCopyNum(int axis, int *in_shape, size_t shape_size) { size_t one_input_size = 1; for (size_t i = 0; i < shape_size; ++i) { one_input_size *= in_shape[i]; @@ -29,7 +29,7 @@ size_t GetStackCopyNum(int axis, int *in_shape, size_t shape_size) { return copy_num; } -size_t GetStackPreAxisCount(const int *in_shape, int axis) { +size_t Fp16GetStackPreAxisCount2(const int *in_shape, int axis) { size_t pre_axis_count = 1; for (size_t i = 0; i < axis; ++i) { pre_axis_count *= in_shape[i]; @@ -39,9 +39,9 @@ size_t GetStackPreAxisCount(const int *in_shape, int axis) { void DoStackFp16(const float16_t *const *inputs, size_t input_num, int *in_shape, size_t shape_size, int axis, float16_t *output) { - size_t copy_num = GetStackCopyNum(axis, in_shape, shape_size); + size_t copy_num = Fp16GetStackCopyNum(axis, in_shape, shape_size); size_t copy_size = copy_num * sizeof(float16_t); - size_t pre_axis_count = GetStackPreAxisCount(in_shape, axis); + size_t pre_axis_count = Fp16GetStackPreAxisCount2(in_shape, axis); size_t in_offset = 0; size_t out_offset = 0; for (size_t i = 0; i < pre_axis_count; ++i) { diff --git a/mindspore/lite/nnacl/fp16/transpose_fp16.c b/mindspore/lite/nnacl/fp16/transpose_fp16.c index d43dca1000..dfcad94ac7 100644 --- a/mindspore/lite/nnacl/fp16/transpose_fp16.c +++ b/mindspore/lite/nnacl/fp16/transpose_fp16.c @@ -18,8 +18,8 @@ #include #include "nnacl/errorcode.h" -void TransposeDim2(float16_t *in_data, float16_t *out_data, int *strides, int *out_strides, int *perm, - int *output_shape, int h_start, int h_end) { +void Fp16TransposeDim2(float16_t *in_data, float16_t *out_data, int *strides, int *out_strides, int *perm, + int *output_shape, int h_start, int h_end) { const int stride0 = strides[perm[0]]; const int stride1 = strides[perm[1]]; const int output0 = output_shape[0]; @@ -33,8 +33,8 @@ void TransposeDim2(float16_t *in_data, float16_t *out_data, int *strides, int *o } } -void TransposeDim3(float16_t *in_data, float16_t *out_data, int *strides, int *out_strides, int *perm, - int *output_shape, int h_start, int h_end) { +void Fp16TransposeDim3(float16_t *in_data, float16_t *out_data, int *strides, int *out_strides, int *perm, + int *output_shape, int h_start, int h_end) { const int stride0 = strides[perm[0]]; const int stride1 = strides[perm[1]]; const int stride2 = strides[perm[2]]; @@ -56,8 +56,8 @@ void TransposeDim3(float16_t *in_data, float16_t *out_data, int *strides, int *o } } -void TransposeDim4(float16_t *in_data, float16_t *out_data, int *strides, int *out_strides, int *perm, - int *output_shape, int h_start, int h_end) { +void Fp16TransposeDim4(float16_t *in_data, float16_t *out_data, int *strides, int *out_strides, int *perm, + int *output_shape, int h_start, int h_end) { const int stride0 = strides[perm[0]]; const int stride1 = strides[perm[1]]; const int stride2 = strides[perm[2]]; @@ -88,8 +88,8 @@ void TransposeDim4(float16_t *in_data, float16_t *out_data, int *strides, int *o } } -void TransposeDim5(float16_t *in_data, float16_t *out_data, int *strides, int *out_strides, int *perm, - int *output_shape, int h_start, int h_end) { +void Fp16TransposeDim5(float16_t *in_data, float16_t *out_data, int *strides, int *out_strides, int *perm, + int *output_shape, int h_start, int h_end) { const int stride0 = strides[perm[0]]; const int stride1 = strides[perm[1]]; const int stride2 = strides[perm[2]]; @@ -127,8 +127,8 @@ void TransposeDim5(float16_t *in_data, float16_t *out_data, int *strides, int *o } } -int DoTranspose(float16_t *in_data, float16_t *out_data, int *input_shape, int *output_shape, - TransposeParameter *transpose_param, int h_start, int h_end) { +int Fp16DoTranspose(float16_t *in_data, float16_t *out_data, int *input_shape, int *output_shape, + TransposeParameter *transpose_param, int h_start, int h_end) { if (in_data == NULL || out_data == NULL) { return NNACL_ERR; } @@ -156,13 +156,13 @@ int DoTranspose(float16_t *in_data, float16_t *out_data, int *input_shape, int * return NNACL_OK; } if (num_axes == 2) { - TransposeDim2(in_data, out_data, strides, out_strides, perm, output_shape, h_start, h_end); + Fp16TransposeDim2(in_data, out_data, strides, out_strides, perm, output_shape, h_start, h_end); } else if (num_axes == 3) { - TransposeDim3(in_data, out_data, strides, out_strides, perm, output_shape, h_start, h_end); + Fp16TransposeDim3(in_data, out_data, strides, out_strides, perm, output_shape, h_start, h_end); } else if (num_axes == 4) { - TransposeDim4(in_data, out_data, strides, out_strides, perm, output_shape, h_start, h_end); + Fp16TransposeDim4(in_data, out_data, strides, out_strides, perm, output_shape, h_start, h_end); } else if (num_axes == 5) { - TransposeDim5(in_data, out_data, strides, out_strides, perm, output_shape, h_start, h_end); + Fp16TransposeDim5(in_data, out_data, strides, out_strides, perm, output_shape, h_start, h_end); } return NNACL_OK; } diff --git a/mindspore/lite/nnacl/fp16/transpose_fp16.h b/mindspore/lite/nnacl/fp16/transpose_fp16.h index 16975c8c33..1c84c0e6bd 100644 --- a/mindspore/lite/nnacl/fp16/transpose_fp16.h +++ b/mindspore/lite/nnacl/fp16/transpose_fp16.h @@ -35,8 +35,8 @@ typedef struct TransposeParameter { #ifdef __cplusplus extern "C" { #endif -int DoTranspose(float16_t *in_data, float16_t *out_data, int *input_shape, int *output_shape, - TransposeParameter *transpose_param, int h_start, int h_end); +int Fp16DoTranspose(float16_t *in_data, float16_t *out_data, int *input_shape, int *output_shape, + TransposeParameter *transpose_param, int h_start, int h_end); void TransposeDim2(float16_t *in_data, float16_t *out_data, int *strides, int *out_strides, int *perm, int *output_shape, int h_start, int h_end); void TransposeDim3(float16_t *in_data, float16_t *out_data, int *strides, int *out_strides, int *perm, diff --git a/mindspore/lite/nnacl/int8/crop_int8.c b/mindspore/lite/nnacl/int8/crop_int8.c index fadc5319c1..5e42735faf 100644 --- a/mindspore/lite/nnacl/int8/crop_int8.c +++ b/mindspore/lite/nnacl/int8/crop_int8.c @@ -18,17 +18,17 @@ #include "nnacl/int8/crop_int8.h" #include -void Crop(const int8_t *input, int8_t *output, int task_id, CropParameter *para) { +void Int8Crop(const int8_t *input, int8_t *output, int task_id, CropParameter *para) { int input_dim = para->input_dim_; switch (input_dim) { case 1: - Crop1D(input, output, task_id, para); + Int8Crop1D(input, output, task_id, para); break; case 2: - Crop2D(input, output, task_id, para); + Int8Crop2D(input, output, task_id, para); break; case 3: - Crop3D(input, output, task_id, para); + Int8Crop3D(input, output, task_id, para); break; case 4: Int8Crop4D(input, output, task_id, para); @@ -36,7 +36,7 @@ void Crop(const int8_t *input, int8_t *output, int task_id, CropParameter *para) } } -void Crop1D(const int8_t *input, int8_t *output, int task_id, CropParameter *para) { +void Int8Crop1D(const int8_t *input, int8_t *output, int task_id, CropParameter *para) { const int out_batch = para->out_shape_[0]; const int thread_count = para->thread_count_; int64_t task_id_stride = thread_count > 1 ? UP_DIV(out_batch, thread_count) : out_batch; @@ -75,7 +75,7 @@ void Crop1D(const int8_t *input, int8_t *output, int task_id, CropParameter *par return; } -void Crop2D(const int8_t *input, int8_t *output, int task_id, CropParameter *para) { +void Int8Crop2D(const int8_t *input, int8_t *output, int task_id, CropParameter *para) { const int in_height = para->in_shape_[1]; const int out_batch = para->out_shape_[0]; const int out_height = para->out_shape_[1]; @@ -118,7 +118,7 @@ void Crop2D(const int8_t *input, int8_t *output, int task_id, CropParameter *par return; } -void Crop3D(const int8_t *input, int8_t *output, int task_id, CropParameter *para) { +void Int8Crop3D(const int8_t *input, int8_t *output, int task_id, CropParameter *para) { const int in_height = para->in_shape_[1]; const int in_width = para->in_shape_[2]; diff --git a/mindspore/lite/nnacl/int8/crop_int8.h b/mindspore/lite/nnacl/int8/crop_int8.h index 45f50b3a4b..77c2adef40 100644 --- a/mindspore/lite/nnacl/int8/crop_int8.h +++ b/mindspore/lite/nnacl/int8/crop_int8.h @@ -22,10 +22,10 @@ #ifdef __cplusplus extern "C" { #endif -void Crop(const int8_t *input, int8_t *output, int task_id, CropParameter *para); -void Crop1D(const int8_t *input, int8_t *output, int task_id, CropParameter *para); -void Crop2D(const int8_t *input, int8_t *output, int task_id, CropParameter *para); -void Crop3D(const int8_t *input, int8_t *output, int task_id, CropParameter *para); +void Int8Crop(const int8_t *input, int8_t *output, int task_id, CropParameter *para); +void Int8Crop1D(const int8_t *input, int8_t *output, int task_id, CropParameter *para); +void Int8Crop2D(const int8_t *input, int8_t *output, int task_id, CropParameter *para); +void Int8Crop3D(const int8_t *input, int8_t *output, int task_id, CropParameter *para); void Int8Crop4D(const int8_t *input, int8_t *output, int task_id, CropParameter *para); #ifdef __cplusplus } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/crop_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/crop_fp16.cc index 7e49cf7515..85435efe51 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/crop_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/crop_fp16.cc @@ -46,7 +46,7 @@ int CropFp16CPUKernel::Init() { int CropFp16CPUKernel::ReSize() { return CropBaseCPUKernel::ReSize(); } int CropFp16CPUKernel::DoExecute(int task_id) { - Crop(input_ptr_, output_ptr_, task_id, crop_para_); + Fp16Crop(input_ptr_, output_ptr_, task_id, crop_para_); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/scale_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/scale_fp16.cc index 5bd755a6fe..f6458eb29e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/scale_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/scale_fp16.cc @@ -80,7 +80,7 @@ int ScaleFp16CPUKernel::Scale(int task_id) { DoScaleRelu6Fp16(input_, output_, scale_, offset_, task_id, scale_param_); break; case schema::ActivationType_RELU: - DoScaleReluFp16(input_, output_, scale_, offset_, task_id, scale_param_); + Fp16DoScaleRelu(input_, output_, scale_, offset_, task_id, scale_param_); break; case schema::ActivationType_NO_ACTIVATION: DoScaleFp16(input_, output_, scale_, offset_, task_id, scale_param_); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.cc index 5a96c9a335..f7a5dbc427 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.cc @@ -107,8 +107,8 @@ int TransposeFp16CPUKernel::TransposeParallel(int task_id) { int thread_offset = task_id * thread_h_stride_; TransposeParameter *param = reinterpret_cast(this->op_parameter_); - auto ret = DoTranspose(fp16_in_data_, fp16_out_data_, in_shape_, out_shape_, param, thread_offset, - thread_offset + num_unit_thread); + auto ret = Fp16DoTranspose(fp16_in_data_, fp16_out_data_, in_shape_, out_shape_, param, thread_offset, + thread_offset + num_unit_thread); if (ret != RET_OK) { MS_LOG(ERROR) << "Transpose error task_id[" << task_id << "] error_code[" << ret << "]"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.cc index 663353f2ff..6362e7c69d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.cc @@ -84,7 +84,7 @@ int CropInt8CPUKernel::DoExecute(int task_id) { auto out_tensor = out_tensors_.at(kOutputIndex); int8_t *input_data = reinterpret_cast(input_tensor->MutableData()); int8_t *output_data = reinterpret_cast(out_tensor->MutableData()); - Crop(input_data, output_data, task_id, crop_para_); + Int8Crop(input_data, output_data, task_id, crop_para_); return RET_OK; }