diff --git a/mindspore/lite/internal/src/kernel/fp32/arithmetic.cc b/mindspore/lite/internal/src/kernel/fp32/arithmetic.cc index db0ae0f795..91a07900a2 100644 --- a/mindspore/lite/internal/src/kernel/fp32/arithmetic.cc +++ b/mindspore/lite/internal/src/kernel/fp32/arithmetic.cc @@ -22,9 +22,9 @@ #include "nnacl/arithmetic_common.h" #include "nnacl/fp32/arithmetic.h" -typedef int (*ArithmeticRun)(float *input0, float *input1, float *output, int element_size); -typedef int (*ArithmeticOptRun)(float *input0, float *input1, float *output, int element_size, - ArithmeticParameter *param); +typedef int (*ArithmeticRun)(const float *input0, const float *input1, float *output, const int element_size); +typedef int (*ArithmeticOptRun)(const float *input0, const float *input1, float *output, const int element_size, + const ArithmeticParameter *param); int BroadcastRun(float *input0, float *input1, float *output, int dim, int out_count, int break_pos, ArithmeticRun arithmetic_run, ArithmeticParameter *params) { diff --git a/mindspore/lite/nnacl/arithmetic_common.c b/mindspore/lite/nnacl/arithmetic_common.c index 1f59da8bed..267238aeae 100644 --- a/mindspore/lite/nnacl/arithmetic_common.c +++ b/mindspore/lite/nnacl/arithmetic_common.c @@ -52,7 +52,7 @@ void TileOneDimensionUint8(uint8_t *inData, uint8_t *outData, int dim, size_t nd } } -void ComputeStrides(int *shape, int *strides, int ndim) { +void ComputeStrides(const int *shape, int *strides, const int ndim) { int stride = 1; for (int i = ndim - 1; i >= 0; i--) { strides[i] = stride; diff --git a/mindspore/lite/nnacl/arithmetic_common.h b/mindspore/lite/nnacl/arithmetic_common.h index c4f85355fa..744a6a797e 100644 --- a/mindspore/lite/nnacl/arithmetic_common.h +++ b/mindspore/lite/nnacl/arithmetic_common.h @@ -49,7 +49,7 @@ extern "C" { #endif void TileOneDimension(float *inData, float *outData, int dim, size_t ndim, int *inShape, int *inStrides, int *outStrides, int *multiple); -void ComputeStrides(int *shape, int *strides, int ndim); +void ComputeStrides(const int *shape, int *strides, const int ndim); void CalcMultiplesAndStrides(ArithmeticParameter *param); diff --git a/mindspore/lite/nnacl/depth_to_space.c b/mindspore/lite/nnacl/depth_to_space.c index 09e5e0b39f..a41afb9015 100644 --- a/mindspore/lite/nnacl/depth_to_space.c +++ b/mindspore/lite/nnacl/depth_to_space.c @@ -16,7 +16,7 @@ #include "nnacl/depth_to_space.h" #include -void DepthToSpaceForNHWC(const void *input, void *output, int *in_shape, DepthToSpaceParameter *param) { +void DepthToSpaceForNHWC(const void *input, void *output, const int *in_shape, const DepthToSpaceParameter *param) { int32_t block_size = param->block_size_; int32_t in_shape_dim2 = in_shape[2]; int32_t in_shape_dim1 = in_shape[1]; diff --git a/mindspore/lite/nnacl/depth_to_space.h b/mindspore/lite/nnacl/depth_to_space.h index 9b22b24d1f..25f2bf622c 100644 --- a/mindspore/lite/nnacl/depth_to_space.h +++ b/mindspore/lite/nnacl/depth_to_space.h @@ -20,7 +20,7 @@ #ifdef __cplusplus extern "C" { #endif -void DepthToSpaceForNHWC(const void *input, void *output, int *in_shape, DepthToSpaceParameter *param); +void DepthToSpaceForNHWC(const void *input, void *output, const int *in_shape, const DepthToSpaceParameter *param); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/nnacl/fp32/arithmetic.c b/mindspore/lite/nnacl/fp32/arithmetic.c index 3d8ec7fdcc..ffa799ef82 100644 --- a/mindspore/lite/nnacl/fp32/arithmetic.c +++ b/mindspore/lite/nnacl/fp32/arithmetic.c @@ -19,7 +19,8 @@ #define ACCURACY_DATA 0.00000001 -int ElementOptMul(float *input0, float *input1, float *output, int element_size, ArithmeticParameter *param) { +int ElementOptMul(const float *input0, const float *input1, float *output, const int element_size, + const ArithmeticParameter *param) { #ifdef ENABLE_NEON float32x4_t vin0_opt = vdupq_n_f32(input0[0]); float32x4_t vin1_opt = vdupq_n_f32(input1[0]); @@ -51,7 +52,8 @@ int ElementOptMul(float *input0, float *input1, float *output, int element_size, return NNACL_OK; } -int ElementOptMulRelu(float *input0, float *input1, float *output, int element_size, ArithmeticParameter *param) { +int ElementOptMulRelu(const float *input0, const float *input1, float *output, const int element_size, + const ArithmeticParameter *param) { #ifdef ENABLE_NEON float32x4_t vin0_opt = vdupq_n_f32(input0[0]); float32x4_t vin1_opt = vdupq_n_f32(input1[0]); @@ -84,7 +86,8 @@ int ElementOptMulRelu(float *input0, float *input1, float *output, int element_s return NNACL_OK; } -int ElementOptMulRelu6(float *input0, float *input1, float *output, int element_size, ArithmeticParameter *param) { +int ElementOptMulRelu6(const float *input0, const float *input1, float *output, const int element_size, + const ArithmeticParameter *param) { #ifdef ENABLE_NEON float32x4_t vin0_opt = vdupq_n_f32(input0[0]); float32x4_t vin1_opt = vdupq_n_f32(input1[0]); @@ -118,7 +121,8 @@ int ElementOptMulRelu6(float *input0, float *input1, float *output, int element_ return NNACL_OK; } -int ElementOptMulInt(int *input0, int *input1, int *output, int element_size, ArithmeticParameter *param) { +int ElementOptMulInt(const int *input0, const int *input1, int *output, const int element_size, + const ArithmeticParameter *param) { #ifdef ENABLE_NEON int32x4_t vin0_opt = vdupq_n_s32(input0[0]); int32x4_t vin1_opt = vdupq_n_s32(input1[0]); @@ -150,7 +154,8 @@ int ElementOptMulInt(int *input0, int *input1, int *output, int element_size, Ar return NNACL_OK; } -int ElementOptMulReluInt(int *input0, int *input1, int *output, int element_size, ArithmeticParameter *param) { +int ElementOptMulReluInt(const int *input0, const int *input1, int *output, const int element_size, + const ArithmeticParameter *param) { #ifdef ENABLE_NEON int32x4_t vin0_opt = vdupq_n_s32(input0[0]); int32x4_t vin1_opt = vdupq_n_s32(input1[0]); @@ -183,7 +188,8 @@ int ElementOptMulReluInt(int *input0, int *input1, int *output, int element_size return NNACL_OK; } -int ElementOptMulRelu6Int(int *input0, int *input1, int *output, int element_size, ArithmeticParameter *param) { +int ElementOptMulRelu6Int(const int *input0, const int *input1, int *output, const int element_size, + const ArithmeticParameter *param) { #ifdef ENABLE_NEON int32x4_t vin0_opt = vdupq_n_s32(input0[0]); int32x4_t vin1_opt = vdupq_n_s32(input1[0]); @@ -217,7 +223,8 @@ int ElementOptMulRelu6Int(int *input0, int *input1, int *output, int element_siz return NNACL_OK; } -int ElementOptSub(float *input0, float *input1, float *output, int element_size, ArithmeticParameter *param) { +int ElementOptSub(const float *input0, const float *input1, float *output, const int element_size, + const ArithmeticParameter *param) { #ifdef ENABLE_NEON float32x4_t vin0_opt = vdupq_n_f32(input0[0]); float32x4_t vin1_opt = vdupq_n_f32(input1[0]); @@ -249,7 +256,8 @@ int ElementOptSub(float *input0, float *input1, float *output, int element_size, return NNACL_OK; } -int ElementOptSubRelu(float *input0, float *input1, float *output, int element_size, ArithmeticParameter *param) { +int ElementOptSubRelu(const float *input0, const float *input1, float *output, const int element_size, + const ArithmeticParameter *param) { #ifdef ENABLE_NEON float32x4_t vin0_opt = vdupq_n_f32(input0[0]); float32x4_t vin1_opt = vdupq_n_f32(input1[0]); @@ -282,7 +290,8 @@ int ElementOptSubRelu(float *input0, float *input1, float *output, int element_s return NNACL_OK; } -int ElementOptSubRelu6(float *input0, float *input1, float *output, int element_size, ArithmeticParameter *param) { +int ElementOptSubRelu6(const float *input0, const float *input1, float *output, const int element_size, + const ArithmeticParameter *param) { #ifdef ENABLE_NEON float32x4_t vin0_opt = vdupq_n_f32(input0[0]); float32x4_t vin1_opt = vdupq_n_f32(input1[0]); @@ -316,7 +325,8 @@ int ElementOptSubRelu6(float *input0, float *input1, float *output, int element_ return NNACL_OK; } -int ElementOptAdd(float *input0, float *input1, float *output, int element_size, ArithmeticParameter *param) { +int ElementOptAdd(const float *input0, const float *input1, float *output, const int element_size, + const ArithmeticParameter *param) { #ifdef ENABLE_NEON float32x4_t vin0_opt = vdupq_n_f32(input0[0]); float32x4_t vin1_opt = vdupq_n_f32(input1[0]); @@ -348,7 +358,8 @@ int ElementOptAdd(float *input0, float *input1, float *output, int element_size, return NNACL_OK; } -int ElementOptAddInt(int *input0, int *input1, int *output, int element_size, ArithmeticParameter *param) { +int ElementOptAddInt(const int *input0, const int *input1, int *output, const int element_size, + const ArithmeticParameter *param) { #ifdef ENABLE_NEON int32x4_t vin0_opt = vdupq_n_s32(input0[0]); int32x4_t vin1_opt = vdupq_n_s32(input1[0]); @@ -380,7 +391,8 @@ int ElementOptAddInt(int *input0, int *input1, int *output, int element_size, Ar return NNACL_OK; } -int ElementOptAddRelu(float *input0, float *input1, float *output, int element_size, ArithmeticParameter *param) { +int ElementOptAddRelu(const float *input0, const float *input1, float *output, const int element_size, + const ArithmeticParameter *param) { #ifdef ENABLE_NEON float32x4_t vin0_opt = vdupq_n_f32(input0[0]); float32x4_t vin1_opt = vdupq_n_f32(input1[0]); @@ -413,7 +425,8 @@ int ElementOptAddRelu(float *input0, float *input1, float *output, int element_s return NNACL_OK; } -int ElementOptAddRelu6(float *input0, float *input1, float *output, int element_size, ArithmeticParameter *param) { +int ElementOptAddRelu6(const float *input0, const float *input1, float *output, const int element_size, + const ArithmeticParameter *param) { #ifdef ENABLE_NEON float32x4_t vin0_opt = vdupq_n_f32(input0[0]); float32x4_t vin1_opt = vdupq_n_f32(input1[0]); @@ -448,7 +461,8 @@ int ElementOptAddRelu6(float *input0, float *input1, float *output, int element_ return NNACL_OK; } -int ElementOptDiv(float *input0, float *input1, float *output, int element_size, ArithmeticParameter *param) { +int ElementOptDiv(const float *input0, const float *input1, float *output, const int element_size, + const ArithmeticParameter *param) { if (param->in_elements_num0_ == 1) { for (int index = 0; index < element_size; index++) { output[index] = input0[0] / input1[index]; @@ -464,7 +478,8 @@ int ElementOptDiv(float *input0, float *input1, float *output, int element_size, return NNACL_OK; } -int ElementOptDivRelu(float *input0, float *input1, float *output, int element_size, ArithmeticParameter *param) { +int ElementOptDivRelu(const float *input0, const float *input1, float *output, const int element_size, + const ArithmeticParameter *param) { if (param->in_elements_num0_ == 1) { for (int index = 0; index < element_size; index++) { output[index] = input0[0] / input1[index]; @@ -479,7 +494,8 @@ int ElementOptDivRelu(float *input0, float *input1, float *output, int element_s return NNACL_OK; } -int ElementOptDivRelu6(float *input0, float *input1, float *output, int element_size, ArithmeticParameter *param) { +int ElementOptDivRelu6(const float *input0, const float *input1, float *output, const int element_size, + const ArithmeticParameter *param) { if (param->in_elements_num0_ == 1) { for (int index = 0; index < element_size; index++) { output[index] = MSMIN(MSMAX(input0[0] / input1[index], 0), 6); @@ -492,7 +508,7 @@ int ElementOptDivRelu6(float *input0, float *input1, float *output, int element_ return NNACL_OK; } -int ElementMul(float *input0, float *input1, float *output, int element_size) { +int ElementMul(const float *input0, const float *input1, float *output, const int element_size) { int index = 0; #ifdef ENABLE_NEON for (; index <= element_size - 4; index += C4NUM) { @@ -508,7 +524,7 @@ int ElementMul(float *input0, float *input1, float *output, int element_size) { return NNACL_OK; } -int ElementMulRelu(float *input0, float *input1, float *output, int element_size) { +int ElementMulRelu(const float *input0, const float *input1, float *output, const int element_size) { int index = 0; #ifdef ENABLE_NEON float32x4_t zeros = vdupq_n_f32(0.0f); @@ -527,7 +543,7 @@ int ElementMulRelu(float *input0, float *input1, float *output, int element_size return NNACL_OK; } -int ElementMulRelu6(float *input0, float *input1, float *output, int element_size) { +int ElementMulRelu6(const float *input0, const float *input1, float *output, const int element_size) { int index = 0; #ifdef ENABLE_NEON float32x4_t zeros = vdupq_n_f32(0.0f); @@ -545,7 +561,7 @@ int ElementMulRelu6(float *input0, float *input1, float *output, int element_siz return NNACL_OK; } -int ElementMulInt(int *input0, int *input1, int *output, int element_size) { +int ElementMulInt(const int *input0, const int *input1, int *output, const int element_size) { int index = 0; #ifdef ENABLE_NEON for (; index <= element_size - 4; index += C4NUM) { @@ -561,7 +577,7 @@ int ElementMulInt(int *input0, int *input1, int *output, int element_size) { return NNACL_OK; } -int ElementMulReluInt(int *input0, int *input1, int *output, int element_size) { +int ElementMulReluInt(const int *input0, const int *input1, int *output, const int element_size) { int index = 0; #ifdef ENABLE_NEON int32x4_t zeros = vdupq_n_s32(0); @@ -580,7 +596,7 @@ int ElementMulReluInt(int *input0, int *input1, int *output, int element_size) { return NNACL_OK; } -int ElementMulRelu6Int(int *input0, int *input1, int *output, int element_size) { +int ElementMulRelu6Int(const int *input0, const int *input1, int *output, const int element_size) { int index = 0; #ifdef ENABLE_NEON int32x4_t zeros = vdupq_n_s32(0); @@ -604,7 +620,7 @@ int BroadcastMul(float *input0, float *input1, float *tile_input0, float *tile_i return ElementMul(tile_input0, tile_input1, output, element_size); } -int ElementAdd(float *input0, float *input1, float *output, int element_size) { +int ElementAdd(const float *input0, const float *input1, float *output, const int element_size) { int index = 0; #ifdef ENABLE_NEON for (; index <= element_size - 4; index += C4NUM) { @@ -620,7 +636,7 @@ int ElementAdd(float *input0, float *input1, float *output, int element_size) { return NNACL_OK; } -int ElementAddRelu(float *input0, float *input1, float *output, int element_size) { +int ElementAddRelu(const float *input0, const float *input1, float *output, const int element_size) { int index = 0; #ifdef ENABLE_NEON float32x4_t zeros = vdupq_n_f32(0.0f); @@ -639,7 +655,7 @@ int ElementAddRelu(float *input0, float *input1, float *output, int element_size return NNACL_OK; } -int ElementAddRelu6(float *input0, float *input1, float *output, int element_size) { +int ElementAddRelu6(const float *input0, const float *input1, float *output, const int element_size) { int index = 0; #ifdef ENABLE_NEON float32x4_t zeros = vdupq_n_f32(0.0f); @@ -657,7 +673,7 @@ int ElementAddRelu6(float *input0, float *input1, float *output, int element_siz return NNACL_OK; } -int ElementAddInt(int *input0, int *input1, int *output, int element_size) { +int ElementAddInt(const int *input0, const int *input1, int *output, const int element_size) { int index = 0; #ifdef ENABLE_NEON for (; index <= element_size - 4; index += C4NUM) { @@ -692,7 +708,7 @@ int BroadcastAddInt8(int8_t *input0, int8_t *input1, int8_t *tile_input0, int8_t return ElementAddInt8(tile_input0, tile_input1, output, element_size); } -int ElementSub(float *input0, float *input1, float *output, int element_size) { +int ElementSub(const float *input0, const float *input1, float *output, const int element_size) { int index = 0; #ifdef ENABLE_NEON for (; index <= element_size - 4; index += C4NUM) { @@ -708,7 +724,7 @@ int ElementSub(float *input0, float *input1, float *output, int element_size) { return NNACL_OK; } -int ElementSubRelu(float *input0, float *input1, float *output, int element_size) { +int ElementSubRelu(const float *input0, const float *input1, float *output, const int element_size) { int index = 0; #ifdef ENABLE_NEON float32x4_t zeros = vdupq_n_f32(0.0f); @@ -727,7 +743,7 @@ int ElementSubRelu(float *input0, float *input1, float *output, int element_size return NNACL_OK; } -int ElementSubRelu6(float *input0, float *input1, float *output, int element_size) { +int ElementSubRelu6(const float *input0, const float *input1, float *output, const int element_size) { int index = 0; #ifdef ENABLE_NEON float32x4_t zeros = vdupq_n_f32(0.0f); @@ -752,14 +768,14 @@ int BroadcastSub(float *input0, float *input1, float *tile_input0, float *tile_i return ElementSub(tile_input0, tile_input1, output, element_size); } -int ElementDiv(float *input0, float *input1, float *output, int element_size) { +int ElementDiv(const float *input0, const float *input1, float *output, const int element_size) { for (int i = 0; i < element_size; i++) { output[i] = input0[i] / input1[i]; } return NNACL_OK; } -int ElementDivRelu(float *input0, float *input1, float *output, int element_size) { +int ElementDivRelu(const float *input0, const float *input1, float *output, const int element_size) { for (int i = 0; i < element_size; i++) { float res = input0[i] / input1[i]; output[i] = res > 0 ? res : 0; @@ -767,7 +783,7 @@ int ElementDivRelu(float *input0, float *input1, float *output, int element_size return NNACL_OK; } -int ElementDivRelu6(float *input0, float *input1, float *output, int element_size) { +int ElementDivRelu6(const float *input0, const float *input1, float *output, const int element_size) { for (int i = 0; i < element_size; i++) { output[i] = MSMIN(MSMAX(input0[i] / input1[i], 0), 6); } @@ -780,14 +796,14 @@ int BroadcastDiv(float *input0, float *input1, float *tile_input0, float *tile_i return ElementDiv(tile_input0, tile_input1, output, element_size); } -int ElementFloorMod(float *input0, float *input1, float *output, int element_size) { +int ElementFloorMod(const float *input0, const float *input1, float *output, const int element_size) { for (int i = 0; i < element_size; i++) { output[i] = input0[i] - floorf(input0[i] / input1[i]) * input1[i]; } return NNACL_OK; } -int ElementFloorModInt(int *input0, int *input1, int *output, int element_size) { +int ElementFloorModInt(const int *input0, const int *input1, int *output, const int element_size) { for (int i = 0; i < element_size; i++) { output[i] = input0[i] - (input0[i] / input1[i]) * input1[i]; } @@ -800,14 +816,14 @@ int BroadcastFloorMod(float *input0, float *input1, float *tile_input0, float *t return ElementFloorMod(tile_input0, tile_input1, output, element_size); } -int ElementFloorDiv(float *input0, float *input1, float *output, int element_size) { +int ElementFloorDiv(const float *input0, const float *input1, float *output, const int element_size) { for (int i = 0; i < element_size; i++) { output[i] = floorf(input0[i] / input1[i]); } return NNACL_OK; } -int ElementFloorDivInt(int *input0, int *input1, int *output, int element_size) { +int ElementFloorDivInt(const int *input0, const int *input1, int *output, const int element_size) { for (int i = 0; i < element_size; i++) { output[i] = input0[i] / input1[i]; } @@ -820,7 +836,7 @@ int BroadcastFloorDiv(float *input0, float *input1, float *tile_input0, float *t return ElementFloorDiv(tile_input0, tile_input1, output, element_size); } -int ElementLogicalAnd(float *input0, float *input1, float *output, int element_size) { +int ElementLogicalAnd(const float *input0, const float *input1, float *output, const int element_size) { int index = 0; #ifdef ENABLE_NEON float32x4_t vtrue = vdupq_n_f32(1); @@ -840,7 +856,7 @@ int ElementLogicalAnd(float *input0, float *input1, float *output, int element_s return NNACL_OK; } -int ElementSquaredDifference(float *input0, float *input1, float *output, int element_size) { +int ElementSquaredDifference(const float *input0, const float *input1, float *output, const int element_size) { ElementSub(input0, input1, output, element_size); return ElementMul(output, output, output, element_size); } @@ -857,7 +873,7 @@ int BroadcastLogicalAnd(float *input0, float *input1, float *tile_input0, float return ElementLogicalAnd(tile_input0, tile_input1, output, element_size); } -int ElementLogicalOr(float *input0, float *input1, float *output, int element_size) { +int ElementLogicalOr(const float *input0, const float *input1, float *output, const int element_size) { int index = 0; #ifdef ENABLE_NEON float32x4_t vtrue = vdupq_n_f32(1); @@ -883,7 +899,7 @@ int BroadcastLogicalOr(float *input0, float *input1, float *tile_input0, float * return ElementLogicalOr(tile_input0, tile_input1, output, element_size); } -int ElementMaximum(float *input0, float *input1, float *output, int element_size) { +int ElementMaximum(const float *input0, const float *input1, float *output, const int element_size) { int index = 0; #ifdef ENABLE_NEON for (; index <= element_size - 4; index += C4NUM) { @@ -905,7 +921,7 @@ int BroadcastMaximum(float *input0, float *input1, float *tile_input0, float *ti return ElementMaximum(tile_input0, tile_input1, output, element_size); } -int ElementMinimum(float *input0, float *input1, float *output, int element_size) { +int ElementMinimum(const float *input0, const float *input1, float *output, const int element_size) { int index = 0; #ifdef ENABLE_NEON for (; index <= element_size - 4; index += C4NUM) { @@ -935,7 +951,7 @@ float FloatNotEqualCheck(float in0, float in1) { return (float)true; } -int ElementNotEqual(float *input0, float *input1, float *output, int element_size) { +int ElementNotEqual(const float *input0, const float *input1, float *output, const int element_size) { int index = 0; #ifdef ENABLE_NEON float32x4_t vtrue = vdupq_n_f32(1); @@ -967,7 +983,7 @@ float FloatEqualCheck(float in0, float in1) { return (float)false; } -int ElementEqual(float *input0, float *input1, float *output, int element_size) { +int ElementEqual(const float *input0, const float *input1, float *output, const int element_size) { int index = 0; #ifdef ENABLE_NEON float32x4_t vtrue = vdupq_n_f32(1); @@ -991,7 +1007,7 @@ int BroadcastEqual(float *input0, float *input1, float *tile_input0, float *tile return ElementEqual(tile_input0, tile_input1, output, element_size); } -int ElementLess(float *input0, float *input1, float *output, int element_size) { +int ElementLess(const float *input0, const float *input1, float *output, const int element_size) { int index = 0; #ifdef ENABLE_NEON float32x4_t vtrue = vdupq_n_f32(1); @@ -1015,7 +1031,7 @@ int BroadcastLess(float *input0, float *input1, float *tile_input0, float *tile_ return ElementLess(tile_input0, tile_input1, output, element_size); } -int ElementLessEqual(float *input0, float *input1, float *output, int element_size) { +int ElementLessEqual(const float *input0, const float *input1, float *output, const int element_size) { int index = 0; #ifdef ENABLE_NEON float32x4_t vtrue = vdupq_n_f32(1); @@ -1039,7 +1055,7 @@ int BroadcastLessEqual(float *input0, float *input1, float *tile_input0, float * return ElementLessEqual(tile_input0, tile_input1, output, element_size); } -int ElementGreater(float *input0, float *input1, float *output, int element_size) { +int ElementGreater(const float *input0, const float *input1, float *output, const int element_size) { int index = 0; #ifdef ENABLE_NEON float32x4_t vtrue = vdupq_n_f32(1); @@ -1063,7 +1079,7 @@ int BroadcastGreater(float *input0, float *input1, float *tile_input0, float *ti return ElementGreater(tile_input0, tile_input1, output, element_size); } -int ElementGreaterEqual(float *input0, float *input1, float *output, int element_size) { +int ElementGreaterEqual(const float *input0, const float *input1, float *output, const int element_size) { int index = 0; #ifdef ENABLE_NEON float32x4_t vtrue = vdupq_n_f32(1); diff --git a/mindspore/lite/nnacl/fp32/arithmetic.h b/mindspore/lite/nnacl/fp32/arithmetic.h index aac6a99120..202f9e85fd 100644 --- a/mindspore/lite/nnacl/fp32/arithmetic.h +++ b/mindspore/lite/nnacl/fp32/arithmetic.h @@ -26,105 +26,121 @@ #ifdef __cplusplus extern "C" { #endif -int ElementOptAdd(float *input0, float *input1, float *output, int element_size, ArithmeticParameter *param); -int ElementOptAddInt(int *input0, int *input1, int *output, int element_size, ArithmeticParameter *param); -int ElementOptAddRelu(float *input0, float *input1, float *output, int element_size, ArithmeticParameter *param); -int ElementOptAddRelu6(float *input0, float *input1, float *output, int element_size, ArithmeticParameter *param); -int ElementOptSub(float *input0, float *input1, float *output, int element_size, ArithmeticParameter *param); -int ElementOptSubRelu(float *input0, float *input1, float *output, int element_size, ArithmeticParameter *param); -int ElementOptSubRelu6(float *input0, float *input1, float *output, int element_size, ArithmeticParameter *param); -int ElementOptMul(float *input0, float *input1, float *output, int element_size, ArithmeticParameter *param); -int ElementOptMulRelu(float *input0, float *input1, float *output, int element_size, ArithmeticParameter *param); -int ElementOptMulRelu6(float *input0, float *input1, float *output, int element_size, ArithmeticParameter *param); -int ElementOptMulInt(int *input0, int *input1, int *output, int element_size, ArithmeticParameter *param); -int ElementOptMulReluInt(int *input0, int *input1, int *output, int element_size, ArithmeticParameter *param); -int ElementOptMulRelu6Int(int *input0, int *input1, int *output, int element_size, ArithmeticParameter *param); -int ElementOptDiv(float *input0, float *input1, float *output, int element_size, ArithmeticParameter *param); -int ElementOptDivRelu(float *input0, float *input1, float *output, int element_size, ArithmeticParameter *param); -int ElementOptDivRelu6(float *input0, float *input1, float *output, int element_size, ArithmeticParameter *param); -int ElementMul(float *input0, float *input1, float *output, int element_size); -int ElementMulRelu(float *input0, float *input1, float *output, int element_size); -int ElementMulRelu6(float *input0, float *input1, float *output, int element_size); -int ElementMulInt(int *input0, int *input1, int *output, int element_size); -int ElementMulReluInt(int *input0, int *input1, int *output, int element_size); -int ElementMulRelu6Int(int *input0, int *input1, int *output, int element_size); +int ElementOptAdd(const float *input0, const float *input1, float *output, const int element_size, + const ArithmeticParameter *param); +int ElementOptAddInt(const int *input0, const int *input1, int *output, const int element_size, + const ArithmeticParameter *param); +int ElementOptAddRelu(const float *input0, const float *input1, float *output, const int element_size, + const ArithmeticParameter *param); +int ElementOptAddRelu6(const float *input0, const float *input1, float *output, const int element_size, + const ArithmeticParameter *param); +int ElementOptSub(const float *input0, const float *input1, float *output, const int element_size, + const ArithmeticParameter *param); +int ElementOptSubRelu(const float *input0, const float *input1, float *output, const int element_size, + const ArithmeticParameter *param); +int ElementOptSubRelu6(const float *input0, const float *input1, float *output, const int element_size, + const ArithmeticParameter *param); +int ElementOptMul(const float *input0, const float *input1, float *output, const int element_size, + const ArithmeticParameter *param); +int ElementOptMulRelu(const float *input0, const float *input1, float *output, const int element_size, + const ArithmeticParameter *param); +int ElementOptMulRelu6(const float *input0, const float *input1, float *output, const int element_size, + const ArithmeticParameter *param); +int ElementOptMulInt(const int *input0, const int *input1, int *output, const int element_size, + const ArithmeticParameter *param); +int ElementOptMulReluInt(const int *input0, const int *input1, int *output, const int element_size, + const ArithmeticParameter *param); +int ElementOptMulRelu6Int(const int *input0, const int *input1, int *output, const int element_size, + const ArithmeticParameter *param); +int ElementOptDiv(const float *input0, const float *input1, float *output, const int element_size, + const ArithmeticParameter *param); +int ElementOptDivRelu(const float *input0, const float *input1, float *output, const int element_size, + const ArithmeticParameter *param); +int ElementOptDivRelu6(const float *input0, const float *input1, float *output, const int element_size, + const ArithmeticParameter *param); +int ElementMul(const float *input0, const float *input1, float *output, const int element_size); +int ElementMulRelu(const float *input0, const float *input1, float *output, const int element_size); +int ElementMulRelu6(const float *input0, const float *input1, float *output, const int element_size); +int ElementMulInt(const int *input0, const int *input1, int *output, const int element_size); +int ElementMulReluInt(const int *input0, const int *input1, int *output, const int element_size); +int ElementMulRelu6Int(const int *input0, const int *input1, int *output, const int element_size); int BroadcastMul(float *input0, float *input1, float *tile_input0, float *tile_input1, float *output, int element_size, ArithmeticParameter *param); -int ElementAdd(float *input0, float *input1, float *output, int element_size); -int ElementAddRelu(float *input0, float *input1, float *output, int element_size); -int ElementAddRelu6(float *input0, float *input1, float *output, int element_size); -int ElementAddInt(int *input0, int *input1, int *output, int element_size); +int ElementAdd(const float *input0, const float *input1, float *output, const int element_size); +int ElementAddRelu(const float *input0, const float *input1, float *output, const int element_size); +int ElementAddRelu6(const float *input0, const float *input1, float *output, const int element_size); +int ElementAddInt(const int *input0, const int *input1, int *output, const int element_size); int BroadcastAdd(float *input0, float *input1, float *tile_input0, float *tile_input1, float *output, int element_size, ArithmeticParameter *param); int BroadcastAddInt8(int8_t *input0, int8_t *input1, int8_t *tile_input0, int8_t *tile_input1, int8_t *output, int element_size, ArithmeticParameter *param); -int ElementSub(float *input0, float *input1, float *output, int element_size); -int ElementSubRelu(float *input0, float *input1, float *output, int element_size); -int ElementSubRelu6(float *input0, float *input1, float *output, int element_size); +int ElementSub(const float *input0, const float *input1, float *output, const int element_size); +int ElementSubRelu(const float *input0, const float *input1, float *output, const int element_size); +int ElementSubRelu6(const float *input0, const float *input1, float *output, const int element_size); int BroadcastSub(float *input0, float *input1, float *tile_input0, float *tile_input1, float *output, int element_size, ArithmeticParameter *param); -int ElementDiv(float *input0, float *input1, float *output, int element_size); -int ElementDivRelu(float *input0, float *input1, float *output, int element_size); -int ElementDivRelu6(float *input0, float *input1, float *output, int element_size); +int ElementDiv(const float *input0, const float *input1, float *output, const int element_size); +int ElementDivRelu(const float *input0, const float *input1, float *output, const int element_size); +int ElementDivRelu6(const float *input0, const float *input1, float *output, const int element_size); int BroadcastDiv(float *input0, float *input1, float *tile_input0, float *tile_input1, float *output, int element_size, ArithmeticParameter *param); -int ElementLogicalAnd(float *input0, float *input1, float *output, int element_size); +int ElementLogicalAnd(const float *input0, const float *input1, float *output, const int element_size); int BroadcastLogicalAnd(float *input0, float *input1, float *tile_input0, float *tile_input1, float *output, int element_size, ArithmeticParameter *param); -int ElementLogicalOr(float *input0, float *input1, float *output, int element_size); +int ElementLogicalOr(const float *input0, const float *input1, float *output, const int element_size); int BroadcastLogicalOr(float *input0, float *input1, float *tile_input0, float *tile_input1, float *output, int element_size, ArithmeticParameter *param); -int ElementMaximum(float *input0, float *input1, float *output, int element_size); +int ElementMaximum(const float *input0, const float *input1, float *output, const int element_size); int BroadcastMaximum(float *input0, float *input1, float *tile_input0, float *tile_input1, float *output, int element_size, ArithmeticParameter *param); -int ElementMinimum(float *input0, float *input1, float *output, int element_size); +int ElementMinimum(const float *input0, const float *input1, float *output, const int element_size); int BroadcastMinimum(float *input0, float *input1, float *tile_input0, float *tile_input1, float *output, int element_size, ArithmeticParameter *param); -int ElementFloorDiv(float *input0, float *input1, float *output, int element_size); -int ElementFloorDivInt(int *input0, int *input1, int *output, int element_size); +int ElementFloorDiv(const float *input0, const float *input1, float *output, const int element_size); +int ElementFloorDivInt(const int *input0, const int *input1, int *output, const int element_size); int BroadcastFloorDiv(float *input0, float *input1, float *tile_input0, float *tile_input1, float *output, int element_size, ArithmeticParameter *param); -int ElementFloorMod(float *input0, float *input1, float *output, int element_size); -int ElementFloorModInt(int *input0, int *input1, int *output, int element_size); +int ElementFloorMod(const float *input0, const float *input1, float *output, const int element_size); +int ElementFloorModInt(const int *input0, const int *input1, int *output, const int element_size); int BroadcastFloorMod(float *input0, float *input1, float *tile_input0, float *tile_input1, float *output, int element_size, ArithmeticParameter *param); -int ElementSquaredDifference(float *input0, float *input1, float *output, int element_size); +int ElementSquaredDifference(const float *input0, const float *input1, float *output, const int element_size); int BroadcastSquaredDifference(float *input0, float *input1, float *tile_input0, float *tile_input1, float *output, int element_size, ArithmeticParameter *param); -int ElementNotEqual(float *input0, float *input1, float *output, int element_size); +int ElementNotEqual(const float *input0, const float *input1, float *output, const int element_size); int BroadcastNotEqual(float *input0, float *input1, float *tile_input0, float *tile_input1, float *output, int element_size, ArithmeticParameter *param); -int ElementEqual(float *input0, float *input1, float *output, int element_size); +int ElementEqual(const float *input0, const float *input1, float *output, const int element_size); int BroadcastEqual(float *input0, float *input1, float *tile_input0, float *tile_input1, float *output, int element_size, ArithmeticParameter *param); -int ElementLess(float *input0, float *input1, float *output, int element_size); +int ElementLess(const float *input0, const float *input1, float *output, const int element_size); int BroadcastLess(float *input0, float *input1, float *tile_input0, float *tile_input1, float *output, int element_size, ArithmeticParameter *param); -int ElementLessEqual(float *input0, float *input1, float *output, int element_size); +int ElementLessEqual(const float *input0, const float *input1, float *output, const int element_size); int BroadcastLessEqual(float *input0, float *input1, float *tile_input0, float *tile_input1, float *output, int element_size, ArithmeticParameter *param); -int ElementGreater(float *input0, float *input1, float *output, int element_size); +int ElementGreater(const float *input0, const float *input1, float *output, const int element_size); int BroadcastGreater(float *input0, float *input1, float *tile_input0, float *tile_input1, float *output, int element_size, ArithmeticParameter *param); -int ElementGreaterEqual(float *input0, float *input1, float *output, int element_size); +int ElementGreaterEqual(const float *input0, const float *input1, float *output, const int element_size); int BroadcastGreaterEqual(float *input0, float *input1, float *tile_input0, float *tile_input1, float *output, int element_size, ArithmeticParameter *param); diff --git a/mindspore/lite/nnacl/fp32/batchnorm.c b/mindspore/lite/nnacl/fp32/batchnorm.c index 1c01c9be02..0e2449b1b8 100644 --- a/mindspore/lite/nnacl/fp32/batchnorm.c +++ b/mindspore/lite/nnacl/fp32/batchnorm.c @@ -18,7 +18,6 @@ #include #include "nnacl/batchnorm_parameter.h" #include "nnacl/op_base.h" -#include "nnacl/errorcode.h" void BatchNormFp32(const void *input, const void *mean, const void *variance, BatchNormParameter *param, int task_id, void *output) { diff --git a/mindspore/lite/nnacl/fp32/conv.c b/mindspore/lite/nnacl/fp32/conv.c index 3c89acc73d..4ad11f31a4 100644 --- a/mindspore/lite/nnacl/fp32/conv.c +++ b/mindspore/lite/nnacl/fp32/conv.c @@ -21,7 +21,7 @@ #include "nnacl/fp32/matmul.h" // fp32 conv common -void ConvFp32(float *input_data, float *packed_input, float *packed_weight, const float *bias_data, +void ConvFp32(float *input_data, float *packed_input, const float *packed_weight, const float *bias_data, float *col_major_input, float *output_data, int task_id, ConvParameter *conv_param) { int kernel_h = conv_param->kernel_h_; int kernel_w = conv_param->kernel_w_; @@ -70,7 +70,7 @@ void ConvFp32(float *input_data, float *packed_input, float *packed_weight, cons } // fp32 conv winograd -void ConvWinogardFp32(float *input_data, float *trans_weight, const float *bias_data, float *output_data, +void ConvWinogardFp32(float *input_data, const float *trans_weight, const float *bias_data, float *output_data, TmpBufferAddress *buffer_list, int task_id, ConvParameter *conv_param, InputTransFunc in_func, OutputTransFunc out_func) { int thread_num = conv_param->thread_num_; diff --git a/mindspore/lite/nnacl/fp32/conv.h b/mindspore/lite/nnacl/fp32/conv.h index 274456e557..a5e52fdf94 100644 --- a/mindspore/lite/nnacl/fp32/conv.h +++ b/mindspore/lite/nnacl/fp32/conv.h @@ -34,11 +34,11 @@ extern "C" { #endif // fp32 convolution common (im2col+gemm) -void ConvFp32(float *input_data, float *packed_input, float *packed_weight, const float *bias_data, +void ConvFp32(float *input_data, float *packed_input, const float *packed_weight, const float *bias_data, float *col_major_input, float *output_data, int task_id, ConvParameter *conv_param); // fp32 convolution winograd -void ConvWinogardFp32(float *input_data, float *trans_weight, const float *bias_data, float *output_data, +void ConvWinogardFp32(float *input_data, const float *trans_weight, const float *bias_data, float *output_data, TmpBufferAddress *buffer_list, int task_id, ConvParameter *conv_param, InputTransFunc in_func, OutputTransFunc out_func); #ifdef __cplusplus diff --git a/mindspore/lite/nnacl/fp32/deconv_winograd.c b/mindspore/lite/nnacl/fp32/deconv_winograd.c index 91b0abfe26..359a135ed3 100644 --- a/mindspore/lite/nnacl/fp32/deconv_winograd.c +++ b/mindspore/lite/nnacl/fp32/deconv_winograd.c @@ -56,7 +56,9 @@ int PackDeConvWgDataFp32(float *nhwc_weight, DeConvComputeUnit *unit, ConvParame /* winograd AT */ unit->winograd_.AT_ = malloc(unit->winograd_.i_ * unit->winograd_.o_ * sizeof(float)); if (unit->winograd_.AT_ == NULL) { - free(current_unit_weight); + if (current_unit_weight != NULL) { + free(current_unit_weight); + } return NNACL_NULL_PTR; } memcpy(unit->winograd_.AT_, matrix_at, unit->winograd_.i_ * unit->winograd_.o_ * sizeof(float)); @@ -64,8 +66,12 @@ int PackDeConvWgDataFp32(float *nhwc_weight, DeConvComputeUnit *unit, ConvParame /* winograd BT */ unit->winograd_.BT_ = malloc(unit->winograd_.o_ * unit->winograd_.o_ * sizeof(float)); if (unit->winograd_.BT_ == NULL) { - free(current_unit_weight); - free(unit->winograd_.AT_); + if (current_unit_weight != NULL) { + free(current_unit_weight); + } + if (unit->winograd_.AT_ != NULL) { + free(unit->winograd_.AT_); + } return NNACL_NULL_PTR; } memcpy(unit->winograd_.BT_, matrix_bt, unit->winograd_.o_ * unit->winograd_.o_ * sizeof(float)); @@ -74,9 +80,15 @@ int PackDeConvWgDataFp32(float *nhwc_weight, DeConvComputeUnit *unit, ConvParame size = conv_param->input_channel_ * conv_param->output_channel_ * unit->winograd_.kh_ * unit->winograd_.kw_; float *winograd_unit_weight = (float *)malloc(size * sizeof(float)); if (winograd_unit_weight == NULL) { - free(current_unit_weight); - free(unit->winograd_.AT_); - free(unit->winograd_.BT_); + if (current_unit_weight != NULL) { + free(current_unit_weight); + } + if (unit->winograd_.AT_ != NULL) { + free(unit->winograd_.AT_); + } + if (unit->winograd_.BT_ != NULL) { + free(unit->winograd_.BT_); + } return NNACL_NULL_PTR; } WinogradWeightTransform(current_unit_weight, winograd_unit_weight, matrix_g, matrix_gt, C4NUM, unit->winograd_.kh_, @@ -105,7 +117,9 @@ int PackDeConvWgDataFp32(float *nhwc_weight, DeConvComputeUnit *unit, ConvParame } } - free(current_unit_weight); + if (current_unit_weight != NULL) { + free(current_unit_weight); + } return NNACL_OK; } @@ -317,7 +331,7 @@ void DeConvWgMerge(const float *src, float *dst, size_t src_stride, size_t dst_s return; } -void _deConvWinograd(float *tile_in, float *tile_out, float *weight_buf, float *tmp_buf, float *at_buf, +void _deConvWinograd(const float *tile_in, float *tile_out, float *weight_buf, float *tmp_buf, float *at_buf, float *a_mid_buf, float *trans_a_buf, bool *transfered, float *bt_buf, float *b_tmp_buf, int unit_size, int w_start, int h_start, ConvParameter *conv_param, DeConvParam *deconv_param) { int winograd_plane = unit_size * unit_size; @@ -357,8 +371,8 @@ void _deConvWinograd(float *tile_in, float *tile_out, float *weight_buf, float * return; } -void _deConvCommon(float *tile_in, float *tile_out, float *weight, float *tmp_buf, int h_start, int w_start, int h_size, - int w_size, ConvParameter *conv_param, DeConvParam *deconv_param) { +void _deConvCommon(float *tile_in, float *tile_out, const float *weight, float *tmp_buf, int h_start, int w_start, + int h_size, int w_size, ConvParameter *conv_param, DeConvParam *deconv_param) { int count = deconv_param->oc_div4_ * w_size * h_size; int in_stride = DECONV_WINOGRAD_DEFAULT_TILE * deconv_param->ic_up4_; int out_stride = DECONV_WINOGRAD_DEFAULT_TILE * deconv_param->oc_up4_; diff --git a/mindspore/lite/nnacl/fp32/detection_post_process.c b/mindspore/lite/nnacl/fp32/detection_post_process.c index da54d3f998..b72e8525ae 100644 --- a/mindspore/lite/nnacl/fp32/detection_post_process.c +++ b/mindspore/lite/nnacl/fp32/detection_post_process.c @@ -274,9 +274,9 @@ int NmsMultiClassesFast(const int num_boxes, const int num_classes_with_bg, cons return output_num; } -int DetectionPostProcess(const int num_boxes, const int num_classes_with_bg, float *input_boxes, float *input_scores, - float *input_anchors, float *output_boxes, float *output_classes, float *output_scores, - float *output_num, DetectionPostProcessParameter *param) { +int DetectionPostProcess(const int num_boxes, const int num_classes_with_bg, float *input_boxes, + const float *input_scores, float *input_anchors, float *output_boxes, float *output_classes, + float *output_scores, float *output_num, DetectionPostProcessParameter *param) { BboxCenter scaler; scaler.y = param->y_scale_; scaler.x = param->x_scale_; diff --git a/mindspore/lite/nnacl/fp32/detection_post_process.h b/mindspore/lite/nnacl/fp32/detection_post_process.h index 3bfa566303..096408d6eb 100644 --- a/mindspore/lite/nnacl/fp32/detection_post_process.h +++ b/mindspore/lite/nnacl/fp32/detection_post_process.h @@ -43,9 +43,9 @@ typedef struct { extern "C" { #endif -int DetectionPostProcess(const int num_boxes, const int num_classes_with_bg, float *input_boxes, float *input_scores, - float *input_anchors, float *output_boxes, float *output_classes, float *output_scores, - float *output_num, DetectionPostProcessParameter *param); +int DetectionPostProcess(const int num_boxes, const int num_classes_with_bg, float *input_boxes, + const float *input_scores, float *input_anchors, float *output_boxes, float *output_classes, + float *output_scores, float *output_num, DetectionPostProcessParameter *param); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/nnacl/fp32/elu.c b/mindspore/lite/nnacl/fp32/elu.c index 4d6d84c914..b31940a6c7 100644 --- a/mindspore/lite/nnacl/fp32/elu.c +++ b/mindspore/lite/nnacl/fp32/elu.c @@ -18,7 +18,7 @@ #include #include "nnacl/errorcode.h" -void Calculate_Data(float *input_data, float *output_data, int num, EluParameter *parameter) { +void Calculate_Data(const float *input_data, float *output_data, int num, EluParameter *parameter) { output_data[num] = input_data[num] < 0 ? parameter->alpha_ * expm1(input_data[num]) : input_data[num]; } diff --git a/mindspore/lite/nnacl/fp32/exp.c b/mindspore/lite/nnacl/fp32/exp.c index a135ed4563..e1790e47bc 100644 --- a/mindspore/lite/nnacl/fp32/exp.c +++ b/mindspore/lite/nnacl/fp32/exp.c @@ -18,7 +18,7 @@ #include #include "nnacl/errorcode.h" -int Exp(float *input_data, float *output_data, ExpParameter *parameter, int task_id) { +int Exp(const float *input_data, float *output_data, ExpParameter *parameter, int task_id) { if (parameter->scale_ == 1) { for (size_t i = task_id; i < parameter->element_num_; i += parameter->thread_num_) { output_data[i] = expf(input_data[i]); diff --git a/mindspore/lite/nnacl/fp32/exp.h b/mindspore/lite/nnacl/fp32/exp.h index e9d8bd3d65..2ada5325ac 100644 --- a/mindspore/lite/nnacl/fp32/exp.h +++ b/mindspore/lite/nnacl/fp32/exp.h @@ -33,7 +33,7 @@ typedef struct ExpParameter { #ifdef __cplusplus extern "C" { #endif -int Exp(float *input_data, float *output_data, ExpParameter *parameter, int task_id); +int Exp(const float *input_data, float *output_data, ExpParameter *parameter, int task_id); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/nnacl/fp32/expandDims.c b/mindspore/lite/nnacl/fp32/expandDims.c index 660b8b8aa9..2bc9bc30be 100644 --- a/mindspore/lite/nnacl/fp32/expandDims.c +++ b/mindspore/lite/nnacl/fp32/expandDims.c @@ -18,7 +18,7 @@ #include #include "nnacl/errorcode.h" -int ExpandDims(void *input_ptr, void *output_ptr, size_t data_size) { +int ExpandDims(const void *input_ptr, void *output_ptr, size_t data_size) { memcpy(output_ptr, input_ptr, data_size); return NNACL_OK; } diff --git a/mindspore/lite/nnacl/fp32/expandDims.h b/mindspore/lite/nnacl/fp32/expandDims.h index e106e1189d..f7c7c1f713 100644 --- a/mindspore/lite/nnacl/fp32/expandDims.h +++ b/mindspore/lite/nnacl/fp32/expandDims.h @@ -27,7 +27,7 @@ typedef struct ExpandDimsParameter { #ifdef __cplusplus extern "C" { #endif -int ExpandDims(void *input_ptr, void *output_ptr, size_t data_size); +int ExpandDims(const void *input_ptr, void *output_ptr, size_t data_size); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/nnacl/fp32/gather.c b/mindspore/lite/nnacl/fp32/gather.c index cbd7cf90d7..17eab54582 100644 --- a/mindspore/lite/nnacl/fp32/gather.c +++ b/mindspore/lite/nnacl/fp32/gather.c @@ -18,7 +18,7 @@ #include #include "nnacl/errorcode.h" -inline int Stride(int *shape, int rank, int index) { +inline int Stride(const int *shape, int rank, int index) { int i, stride = 1; for (i = index + 1; i < rank; ++i) { stride *= shape[i]; @@ -26,7 +26,7 @@ inline int Stride(int *shape, int rank, int index) { return stride; } -int Gather(float *input, int outer_size, int inner_size, int limit, int *indices, int indices_element_size, +int Gather(float *input, int outer_size, int inner_size, int limit, const int *indices, int indices_element_size, float *output) { int i, m; for (m = 0; m < outer_size; ++m) { @@ -42,7 +42,7 @@ int Gather(float *input, int outer_size, int inner_size, int limit, int *indices return NNACL_OK; } -int GatherInt32(const int32_t *input, int outer_size, int inner_size, int limit, int *indices, +int GatherInt32(const int32_t *input, int outer_size, int inner_size, int limit, const int *indices, int indices_element_size, int32_t *output) { int i, m; for (m = 0; m < outer_size; ++m) { diff --git a/mindspore/lite/nnacl/fp32/gather.h b/mindspore/lite/nnacl/fp32/gather.h index c2ded7f99e..fcff6a6351 100644 --- a/mindspore/lite/nnacl/fp32/gather.h +++ b/mindspore/lite/nnacl/fp32/gather.h @@ -22,10 +22,10 @@ #ifdef __cplusplus extern "C" { #endif -int Gather(float *input, int outer_size, int inner_size, int limit, int *indices, int indices_element_size, +int Gather(float *input, int outer_size, int inner_size, int limit, const int *indices, int indices_element_size, float *output); -int GatherInt32(const int32_t *input, int outer_size, int inner_size, int limit, int *indices, int indices_element_size, - int32_t *output); +int GatherInt32(const int32_t *input, int outer_size, int inner_size, int limit, const int *indices, + int indices_element_size, int32_t *output); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/nnacl/fp32/gatherNd.c b/mindspore/lite/nnacl/fp32/gatherNd.c index 7a12ea1dd2..56b332b308 100644 --- a/mindspore/lite/nnacl/fp32/gatherNd.c +++ b/mindspore/lite/nnacl/fp32/gatherNd.c @@ -18,7 +18,7 @@ #include #include "nnacl/errorcode.h" -int GatherNd(float *input, float *output, int *in_offset, int area, int count) { +int GatherNd(const float *input, float *output, int *in_offset, int area, int count) { int i = 0; for (i = 0; i < count; i++) { (void)memcpy(output + area * i, input + in_offset[i], area * sizeof(float)); diff --git a/mindspore/lite/nnacl/fp32/gatherNd.h b/mindspore/lite/nnacl/fp32/gatherNd.h index 467c94b974..701000254c 100644 --- a/mindspore/lite/nnacl/fp32/gatherNd.h +++ b/mindspore/lite/nnacl/fp32/gatherNd.h @@ -27,7 +27,7 @@ typedef struct GatherNdParameter { #ifdef __cplusplus extern "C" { #endif -int GatherNd(float *input, float *output, int *in_offset, int area, int count); +int GatherNd(const float *input, float *output, int *in_offset, int area, int count); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/nnacl/fp32/lstm.c b/mindspore/lite/nnacl/fp32/lstm.c index 7c19ecf2e9..b694c66c98 100644 --- a/mindspore/lite/nnacl/fp32/lstm.c +++ b/mindspore/lite/nnacl/fp32/lstm.c @@ -79,13 +79,13 @@ void ElementMulAcc(const float *input0, const float *input1, float *output, int } } -void UpdataState(float *cell_state, float *forget_gate, float *input_gate, float *cell_gate, int batch, +void UpdataState(float *cell_state, float *forget_gate, const float *input_gate, float *cell_gate, int batch, int hidden_size) { ElementMul(forget_gate, cell_state, cell_state, batch * hidden_size); ElementMulAcc(input_gate, cell_gate, cell_state, batch * hidden_size); } -void UpdataOutput(float *cell_state, float *output_gate, float *hidden_state, int batch, int hidden_size) { +void UpdataOutput(const float *cell_state, float *output_gate, float *hidden_state, int batch, int hidden_size) { Tanh(cell_state, batch * hidden_size, hidden_state); ElementMul(hidden_state, output_gate, hidden_state, batch * hidden_size); } diff --git a/mindspore/lite/nnacl/fp32/matmul.c b/mindspore/lite/nnacl/fp32/matmul.c index 9ea98fdbf0..85a219ab95 100644 --- a/mindspore/lite/nnacl/fp32/matmul.c +++ b/mindspore/lite/nnacl/fp32/matmul.c @@ -16,7 +16,7 @@ #include "nnacl/fp32/matmul.h" -void RowMajor2ColMajor(float *src_ptr, float *dst_ptr, int row, int col) { +void RowMajor2ColMajor(const float *src_ptr, float *dst_ptr, int row, int col) { for (int r = 0; r < row; ++r) { for (int c = 0; c < col; ++c) { dst_ptr[c * row + r] = src_ptr[r * col + c]; diff --git a/mindspore/lite/nnacl/fp32/matmul.h b/mindspore/lite/nnacl/fp32/matmul.h index 5b07f514f4..39b14d2d83 100644 --- a/mindspore/lite/nnacl/fp32/matmul.h +++ b/mindspore/lite/nnacl/fp32/matmul.h @@ -29,7 +29,7 @@ extern "C" { void MatMulOpt(const float *a, const float *b, float *c, const float *bias, ActType act_type, int deep, int row, int col, size_t stride, int out_type); void MatVecMul(const float *a, const float *b, float *c, const float *bias, ActType act_type, int depth, int col); -void RowMajor2ColMajor(float *src_ptr, float *dst_ptr, int row, int col); +void RowMajor2ColMajor(const float *src_ptr, float *dst_ptr, int row, int col); void RowMajor2Row4Major(float *src_ptr, float *dst_ptr, int row, int col); void RowMajor2Row8Major(float *src_ptr, float *dst_ptr, int row, int col); void RowMajor2Row12Major(float *src_ptr, float *dst_ptr, int row, int col); diff --git a/mindspore/lite/nnacl/fp32/resize.c b/mindspore/lite/nnacl/fp32/resize.c index 2e2012d167..5cf7beeeb8 100644 --- a/mindspore/lite/nnacl/fp32/resize.c +++ b/mindspore/lite/nnacl/fp32/resize.c @@ -65,7 +65,7 @@ int PrepareResizeBilinear(const int *input_shape, const int *output_shape, bool } int ResizeBilinear(const float *input_data, float *output_data, const int *input_shape, const int *output_shape, - int *y_bottoms, int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights, + int *y_bottoms, const int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights, float *x_left_weights, int n_h_begin, int n_h_end) { if (input_data == NULL || output_data == NULL || input_shape == NULL || output_shape == NULL || y_bottoms == NULL || y_tops == NULL || x_lefts == NULL || x_rights == NULL || y_bottom_weights == NULL || x_left_weights == NULL) { @@ -155,7 +155,7 @@ int ResizeBilinear(const float *input_data, float *output_data, const int *input } int InterpRow(const float *src_line, float *linear_output, int new_width, float *x_left_weights, int *x_lefts, - int *x_rights, int in_c) { + const int *x_rights, int in_c) { int w; for (w = 0; w < new_width; w++) { int c = 0; @@ -208,7 +208,7 @@ int InterpCol(const float *bottom_line, const float *top_line, float *output, in } int ResizeBilinear2(const float *input_data, float *output_data, const int *input_shape, const int *output_shape, - int *y_bottoms, int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights, + int *y_bottoms, const int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights, float *x_left_weights, float *line0, float *line1, int n_h_begin, int n_h_end) { if (input_data == NULL || output_data == NULL || input_shape == NULL || output_shape == NULL || y_bottoms == NULL || y_tops == NULL || x_lefts == NULL || x_rights == NULL || y_bottom_weights == NULL || x_left_weights == NULL) { diff --git a/mindspore/lite/nnacl/fp32/resize.h b/mindspore/lite/nnacl/fp32/resize.h index 039eccc840..3332d0c315 100644 --- a/mindspore/lite/nnacl/fp32/resize.h +++ b/mindspore/lite/nnacl/fp32/resize.h @@ -30,11 +30,11 @@ int PrepareResizeBilinear(const int *input_shape, const int *output_shape, bool int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights, float *x_left_weights); int ResizeBilinear(const float *input_data, float *output_data, const int *input_shape, const int *output_shape, - int *y_bottoms, int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights, + int *y_bottoms, const int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights, float *x_left_weights, int n_h_begin, int n_h_end); int ResizeBilinear2(const float *input_data, float *output_data, const int *input_shape, const int *output_shape, - int *y_bottoms, int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights, + int *y_bottoms, const int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights, float *x_left_weights, float *line0, float *line1, int n_h_begin, int n_h_end); int ResizeNearestNeighbor(const float *input_data, float *output_data, const int *input_shape, const int *output_shape, diff --git a/mindspore/lite/nnacl/fp32/roi_pooling.c b/mindspore/lite/nnacl/fp32/roi_pooling.c index c009589f1d..68f3fa4f76 100644 --- a/mindspore/lite/nnacl/fp32/roi_pooling.c +++ b/mindspore/lite/nnacl/fp32/roi_pooling.c @@ -20,7 +20,7 @@ #include "nnacl/errorcode.h" #include "nnacl/op_base.h" -int ROIPooling(float *in_ptr, float *out_ptr, float *roi, float *max_c, int tid, ROIPoolingParameter *param) { +int ROIPooling(float *in_ptr, float *out_ptr, const float *roi, float *max_c, int tid, ROIPoolingParameter *param) { int num_rois = param->output_n_; int units = UP_DIV(num_rois, param->thread_num_); int roi_st = tid * units; diff --git a/mindspore/lite/nnacl/fp32/roi_pooling.h b/mindspore/lite/nnacl/fp32/roi_pooling.h index cd36fcb110..4159463324 100644 --- a/mindspore/lite/nnacl/fp32/roi_pooling.h +++ b/mindspore/lite/nnacl/fp32/roi_pooling.h @@ -40,7 +40,7 @@ typedef struct ROIPoolingParameter { #ifdef __cplusplus extern "C" { #endif -int ROIPooling(float *in_ptr, float *out_ptr, float *roi, float *max_c, int tid, ROIPoolingParameter *param); +int ROIPooling(float *in_ptr, float *out_ptr, const float *roi, float *max_c, int tid, ROIPoolingParameter *param); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/nnacl/fp32/scale.c b/mindspore/lite/nnacl/fp32/scale.c index 99e17f0332..87d2fa79c6 100644 --- a/mindspore/lite/nnacl/fp32/scale.c +++ b/mindspore/lite/nnacl/fp32/scale.c @@ -18,7 +18,7 @@ #ifdef ENABLE_ARM #include #endif -void ScaleInner(float *in_data, float *out_data, float *scale, float *offset, int outer_start, int outer_end, +void ScaleInner(float *in_data, float *out_data, const float *scale, float *offset, int outer_start, int outer_end, int axis_size, int inner_size) { for (int out = outer_start; out < outer_end; out++) { int out_offset = out * axis_size * inner_size; @@ -43,7 +43,7 @@ void ScaleInner(float *in_data, float *out_data, float *scale, float *offset, in } } -void ScaleAxis(float *in_data, float *out_data, float *scale, float *offset, int outer_start, int outer_end, +void ScaleAxis(float *in_data, float *out_data, const float *scale, float *offset, int outer_start, int outer_end, int axis_size) { for (int out = outer_start; out < outer_end; out++) { int out_offset = out * axis_size; @@ -78,7 +78,7 @@ void DoScale(float *in_data, float *out_data, float *scale, float *offset, int t } } -void ScaleInnerRelu(float *in_data, float *out_data, float *scale, float *offset, int outer_start, int outer_end, +void ScaleInnerRelu(float *in_data, float *out_data, const float *scale, float *offset, int outer_start, int outer_end, int axis_size, int inner_size) { #ifdef ENABLE_ARM64 float32x4_t zeros = {0, 0, 0, 0}; @@ -108,7 +108,7 @@ void ScaleInnerRelu(float *in_data, float *out_data, float *scale, float *offset } } -void ScaleAxisRelu(float *in_data, float *out_data, float *scale, float *offset, int outer_start, int outer_end, +void ScaleAxisRelu(float *in_data, float *out_data, const float *scale, float *offset, int outer_start, int outer_end, int axis_size) { #ifdef ENABLE_ARM64 float32x4_t zeros = {0, 0, 0, 0}; @@ -149,7 +149,7 @@ void DoScaleRelu(float *in_data, float *out_data, float *scale, float *offset, i } } -void ScaleInnerRelu6(float *in_data, float *out_data, float *scale, float *offset, int outer_start, int outer_end, +void ScaleInnerRelu6(float *in_data, float *out_data, const float *scale, float *offset, int outer_start, int outer_end, int axis_size, int inner_size) { #ifdef ENABLE_ARM64 float32x4_t zeros = {0, 0, 0, 0}; @@ -180,7 +180,7 @@ void ScaleInnerRelu6(float *in_data, float *out_data, float *scale, float *offse } } -void ScaleAxisRelu6(float *in_data, float *out_data, float *scale, float *offset, int outer_start, int outer_end, +void ScaleAxisRelu6(float *in_data, float *out_data, const float *scale, float *offset, int outer_start, int outer_end, int axis_size) { #ifdef ENABLE_ARM64 float32x4_t zeros = {0, 0, 0, 0}; diff --git a/mindspore/lite/nnacl/fp32/slice.c b/mindspore/lite/nnacl/fp32/slice.c index a3c264aed6..4233bd88b3 100644 --- a/mindspore/lite/nnacl/fp32/slice.c +++ b/mindspore/lite/nnacl/fp32/slice.c @@ -17,7 +17,6 @@ #include "nnacl/fp32/slice.h" #include #include "nnacl/op_base.h" -#include "nnacl/errorcode.h" void PadSliceParameterTo4D(SliceParameter *param) { int32_t begin[DIMENSION_4D]; diff --git a/mindspore/lite/nnacl/fp32/space_to_batch.c b/mindspore/lite/nnacl/fp32/space_to_batch.c index 589f29080f..fa1967abc5 100644 --- a/mindspore/lite/nnacl/fp32/space_to_batch.c +++ b/mindspore/lite/nnacl/fp32/space_to_batch.c @@ -16,7 +16,7 @@ #include "nnacl/fp32/space_to_batch.h" #include "nnacl/arithmetic_common.h" -void DoSpaceToBatchNHWC(const float *input, float *output, int *block_sizes, int *in_shape, int *out_shape) { +void DoSpaceToBatchNHWC(const float *input, float *output, const int *block_sizes, int *in_shape, int *out_shape) { int out_dim0 = out_shape[0]; int out_dim1 = out_shape[1]; int out_dim2 = out_shape[2]; @@ -45,7 +45,7 @@ void DoSpaceToBatchNHWC(const float *input, float *output, int *block_sizes, int } } -void DoSpaceToBatchPaddingNHWC(const float *input, float *output, int *in_shape, int *padding, int *out_shape) { +void DoSpaceToBatchPaddingNHWC(const float *input, float *output, int *in_shape, const int *padding, int *out_shape) { int in_h = in_shape[1]; int in_w = in_shape[2]; int in_c = in_shape[3]; @@ -63,8 +63,8 @@ void DoSpaceToBatchPaddingNHWC(const float *input, float *output, int *in_shape, for (int i = 0; i < in_shape[0]; ++i) { size_t in_offset0 = i * in_strides[0]; for (int pad_h_top = 0; pad_h_top < padding[0]; ++pad_h_top) { - memset(output + out_offset, 0, ped_h_size); - out_offset += ped_h_num; + memset(output + out_offset, 0, ped_h_size); + out_offset += ped_h_num; } for (int j = 0; j < in_h; ++j) { size_t in_offset1 = in_offset0 + j * in_strides[1]; diff --git a/mindspore/lite/nnacl/fp32/space_to_batch.h b/mindspore/lite/nnacl/fp32/space_to_batch.h index 31c91e5c1f..65f16e3df0 100644 --- a/mindspore/lite/nnacl/fp32/space_to_batch.h +++ b/mindspore/lite/nnacl/fp32/space_to_batch.h @@ -30,8 +30,8 @@ typedef struct SpaceToBatchParameter { #ifdef __cplusplus extern "C" { #endif -void DoSpaceToBatchNHWC(const float *input, float *output, int *block_sizes, int *in_shape, int *out_shape); -void DoSpaceToBatchPaddingNHWC(const float *input, float *output, int *in_shape, int *padding, int *out_shape); +void DoSpaceToBatchNHWC(const float *input, float *output, const int *block_sizes, int *in_shape, int *out_shape); +void DoSpaceToBatchPaddingNHWC(const float *input, float *output, int *in_shape, const int *padding, int *out_shape); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/nnacl/fp32/sparse_to_dense.c b/mindspore/lite/nnacl/fp32/sparse_to_dense.c index c22f1c4677..3e044f9892 100644 --- a/mindspore/lite/nnacl/fp32/sparse_to_dense.c +++ b/mindspore/lite/nnacl/fp32/sparse_to_dense.c @@ -15,9 +15,8 @@ */ #include "nnacl/fp32/sparse_to_dense.h" -void SparseToDense(int **sparse_indices, int *output_shape, - float *sparse_values, float default_value, float *output, - bool isScalar, int index_start, int index_end, int out_width) { +void SparseToDense(int **sparse_indices, int *output_shape, const float *sparse_values, float default_value, + float *output, bool isScalar, int index_start, int index_end, int out_width) { for (int i = index_start; i < index_end; i++) { for (int j = 0; j < out_width; j++) { output[i * out_width + j] = default_value; @@ -31,14 +30,12 @@ void SparseToDense(int **sparse_indices, int *output_shape, int index; if (isScalar == true) { for (int i = index_start; i < index_end; i++) { - index = d1 * sparse_indices[i][0] + d2 * sparse_indices[i][1] + - d3 * sparse_indices[i][2] + sparse_indices[i][3]; + index = d1 * sparse_indices[i][0] + d2 * sparse_indices[i][1] + d3 * sparse_indices[i][2] + sparse_indices[i][3]; output[index] = sparse_values[0]; } } else { for (int i = index_start; i < index_end; i++) { - index = d1 * sparse_indices[i][0] + d2 * sparse_indices[i][1] + - d3 * sparse_indices[i][2] + sparse_indices[i][3]; + index = d1 * sparse_indices[i][0] + d2 * sparse_indices[i][1] + d3 * sparse_indices[i][2] + sparse_indices[i][3]; output[index] = sparse_values[i]; } } diff --git a/mindspore/lite/nnacl/fp32/sparse_to_dense.h b/mindspore/lite/nnacl/fp32/sparse_to_dense.h index 15c2345a76..c7e9df6e68 100644 --- a/mindspore/lite/nnacl/fp32/sparse_to_dense.h +++ b/mindspore/lite/nnacl/fp32/sparse_to_dense.h @@ -21,9 +21,8 @@ #ifdef __cplusplus extern "C" { #endif -void SparseToDense(int **sparse_indices_vect, int *output_shape, - float *sparse_values, float default_value, float *output, - bool isScalar, int index_start, int index_end, int out_width); +void SparseToDense(int **sparse_indices_vect, int *output_shape, const float *sparse_values, float default_value, + float *output, bool isScalar, int index_start, int index_end, int out_width); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/nnacl/fp32/tile.c b/mindspore/lite/nnacl/fp32/tile.c index 70a4c0c264..7903f68a7f 100644 --- a/mindspore/lite/nnacl/fp32/tile.c +++ b/mindspore/lite/nnacl/fp32/tile.c @@ -17,7 +17,7 @@ #include "nnacl/fp32/tile.h" #include -void DoCopyData(float *input_data, float *output_data, size_t size, size_t multiple) { +void DoCopyData(const float *input_data, float *output_data, size_t size, size_t multiple) { float *out_data = output_data; for (size_t i = 0; i < multiple; ++i) { (void)memcpy(out_data, input_data, size * sizeof(float)); diff --git a/mindspore/lite/nnacl/fp32/unique.c b/mindspore/lite/nnacl/fp32/unique.c index 26d186fd79..ea9214dca9 100644 --- a/mindspore/lite/nnacl/fp32/unique.c +++ b/mindspore/lite/nnacl/fp32/unique.c @@ -16,7 +16,7 @@ #include "nnacl/fp32/unique.h" -int Find(float *array, int len, float target) { +int Find(const float *array, int len, float target) { for (int i = 0; i < len; ++i) { if (array[i] == target) { return i; @@ -25,7 +25,7 @@ int Find(float *array, int len, float target) { return -1; } -void Unique(float *input, int input_len, float *output0, int *output0_len, int *output1) { +void Unique(const float *input, int input_len, float *output0, int *output0_len, int *output1) { *output0_len = 0; for (int i = 0; i < input_len; i++) { int idx = Find(output0, *output0_len, input[i]); diff --git a/mindspore/lite/nnacl/fp32/unique.h b/mindspore/lite/nnacl/fp32/unique.h index 2e48d2b33b..a5592623bc 100644 --- a/mindspore/lite/nnacl/fp32/unique.h +++ b/mindspore/lite/nnacl/fp32/unique.h @@ -26,7 +26,7 @@ typedef struct UniqueParameter { #ifdef __cplusplus extern "C" { #endif -void Unique(float *input, int input_len, float *output0, int *output0_len, int *output1); +void Unique(const float *input, int input_len, float *output0, int *output0_len, int *output1); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/nnacl/int8/conv_depthwise_int8.c b/mindspore/lite/nnacl/int8/conv_depthwise_int8.c index 41d838c0eb..187bf90674 100644 --- a/mindspore/lite/nnacl/int8/conv_depthwise_int8.c +++ b/mindspore/lite/nnacl/int8/conv_depthwise_int8.c @@ -491,8 +491,8 @@ void ConvDw3x3Int8Pad(int8_t *output_data, const int8_t *input_data, const int16 /*conv depthwise sliding window perchannel int8 begin*/ void DepthwiseBorderPixelInt8(int8_t *dst, const int8_t *src, const int16_t *weight, const int32_t *bias, int height, int width, int in_kh_step, int in_kw_step, int kernel_w, int8_t *input_zp, - int32_t *out_zp, int *out_multiplier, int *left_shift, int *right_shift, int32_t *acc_min, - int32_t *acc_max) { + int32_t *out_zp, int *out_multiplier, int *left_shift, const int *right_shift, + int32_t *acc_min, int32_t *acc_max) { int tmp_buffer[C8NUM]; for (int i = 0; i < C8NUM; i++) { tmp_buffer[i] = 0; diff --git a/mindspore/lite/nnacl/int8/conv_int8.c b/mindspore/lite/nnacl/int8/conv_int8.c index 3794cbdc16..355370b536 100644 --- a/mindspore/lite/nnacl/int8/conv_int8.c +++ b/mindspore/lite/nnacl/int8/conv_int8.c @@ -94,7 +94,7 @@ void ConvInt8(int8_t *input_data, int8_t *packed_input, int8_t *matmul_input, in unit_size = UP_ROUND(kernel_plane * in_channel, C16NUM); } #endif - bool per_channel; + bool per_channel = false; if (conv_param->conv_quant_arg_.per_channel_ & FILTER_PER_CHANNEL) { input_sum_offset = tile_n * up_round_oc; per_channel = true; diff --git a/mindspore/lite/nnacl/int8/depth_to_space_int8.c b/mindspore/lite/nnacl/int8/depth_to_space_int8.c index 5b19d02ec1..4ccafe5465 100644 --- a/mindspore/lite/nnacl/int8/depth_to_space_int8.c +++ b/mindspore/lite/nnacl/int8/depth_to_space_int8.c @@ -16,7 +16,7 @@ #include "nnacl/int8/depth_to_space_int8.h" #include -void DepthToSpaceForNHWCInt8(const int8_t *input, int8_t *output, int *in_shape, DepthToSpaceParameter *param, +void DepthToSpaceForNHWCInt8(const int8_t *input, int8_t *output, const int *in_shape, DepthToSpaceParameter *param, QuantArg *in_quant_arg, QuantArg *out_quant_arg) { int32_t block_size = param->block_size_; int32_t in_shape_dim2 = in_shape[2]; diff --git a/mindspore/lite/nnacl/int8/depth_to_space_int8.h b/mindspore/lite/nnacl/int8/depth_to_space_int8.h index 1ef57e03f5..f2dc6bcec7 100644 --- a/mindspore/lite/nnacl/int8/depth_to_space_int8.h +++ b/mindspore/lite/nnacl/int8/depth_to_space_int8.h @@ -22,7 +22,7 @@ #ifdef __cplusplus extern "C" { #endif -void DepthToSpaceForNHWCInt8(const int8_t *input, int8_t *output, int *in_shape, DepthToSpaceParameter *param, +void DepthToSpaceForNHWCInt8(const int8_t *input, int8_t *output, const int *in_shape, DepthToSpaceParameter *param, QuantArg *in_quant_arg, QuantArg *out_quant_arg); #ifdef __cplusplus } diff --git a/mindspore/lite/nnacl/int8/gatherNd_int8.c b/mindspore/lite/nnacl/int8/gatherNd_int8.c index 02141cf856..ad67be757c 100644 --- a/mindspore/lite/nnacl/int8/gatherNd_int8.c +++ b/mindspore/lite/nnacl/int8/gatherNd_int8.c @@ -18,7 +18,7 @@ #include #include "nnacl/errorcode.h" -int GatherNdInt8(int8_t *input, int8_t *output, int *in_offset, int area, int count, GatherQuantArg param) { +int GatherNdInt8(int8_t *input, int8_t *output, const int *in_offset, int area, int count, GatherQuantArg param) { double alpha = param.alpha_; int z1 = param.zp_in_; int z2 = param.zp_out_; diff --git a/mindspore/lite/nnacl/int8/gatherNd_int8.h b/mindspore/lite/nnacl/int8/gatherNd_int8.h index 0ad07795fe..a507ca56fe 100644 --- a/mindspore/lite/nnacl/int8/gatherNd_int8.h +++ b/mindspore/lite/nnacl/int8/gatherNd_int8.h @@ -23,7 +23,7 @@ #ifdef __cplusplus extern "C" { #endif -int GatherNdInt8(int8_t *in_data, int8_t *out_data, int *in_offset, int area, int count, GatherQuantArg param); +int GatherNdInt8(int8_t *in_data, int8_t *out_data, const int *in_offset, int area, int count, GatherQuantArg param); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/nnacl/int8/gather_int8.c b/mindspore/lite/nnacl/int8/gather_int8.c index 042e24b5f8..34b71b583b 100644 --- a/mindspore/lite/nnacl/int8/gather_int8.c +++ b/mindspore/lite/nnacl/int8/gather_int8.c @@ -19,7 +19,7 @@ #include "nnacl/quantization/quantize.h" #include "nnacl/errorcode.h" -int GatherInt8(int8_t *in_data, int8_t *out_data, int outer_size, int inner_size, int limit, int *indices, +int GatherInt8(int8_t *in_data, int8_t *out_data, int outer_size, int inner_size, int limit, const int *indices, int indices_element_size, GatherQuantArg para) { double alpha = para.alpha_; int z1 = para.zp_in_; diff --git a/mindspore/lite/nnacl/int8/gather_int8.h b/mindspore/lite/nnacl/int8/gather_int8.h index 4a06e08d6c..5563f9316f 100644 --- a/mindspore/lite/nnacl/int8/gather_int8.h +++ b/mindspore/lite/nnacl/int8/gather_int8.h @@ -23,7 +23,7 @@ #ifdef __cplusplus extern "C" { #endif -int GatherInt8(int8_t *in_data, int8_t *out_data, int outer_size, int inner_size, int limit, int *indices, +int GatherInt8(int8_t *in_data, int8_t *out_data, int outer_size, int inner_size, int limit, const int *indices, int indices_element_size, GatherQuantArg para); #ifdef __cplusplus } diff --git a/mindspore/lite/nnacl/int8/matmul_int8.c b/mindspore/lite/nnacl/int8/matmul_int8.c index e7f60a39d9..c0cc5c2cfa 100644 --- a/mindspore/lite/nnacl/int8/matmul_int8.c +++ b/mindspore/lite/nnacl/int8/matmul_int8.c @@ -301,7 +301,7 @@ void CalcInputSums(int8_t *input, int row, int col, int weight_zp, int *dst, Dat } // dst: bias + depth*input_zp*weight_zp - input_zp*weight_col_sums -void CalcWeightBiasSums(int8_t *weight, int row, int col, int input_zp, int weight_zp, int *bias, int *dst, +void CalcWeightBiasSums(int8_t *weight, int row, int col, int input_zp, int weight_zp, const int *bias, int *dst, DataOrder order) { for (int c = 0; c < col; ++c) { int sum = 0; diff --git a/mindspore/lite/nnacl/int8/matmul_int8.h b/mindspore/lite/nnacl/int8/matmul_int8.h index 7aa1285dbe..4df4e6be5b 100644 --- a/mindspore/lite/nnacl/int8/matmul_int8.h +++ b/mindspore/lite/nnacl/int8/matmul_int8.h @@ -35,7 +35,7 @@ void MatMulInt8_16x4_r(const int8_t *a, const int8_t *b, int8_t *dst, size_t row void RowMajor2Row16x4MajorInt8(int8_t *src_ptr, int8_t *dst_ptr, int row, int col); void RowMajor2Col16x4MajorInt8(int8_t *src, int row, int col, int8_t *dst); void CalcInputSums(int8_t *input, int row, int col, int weight_zp, int *dst, DataOrder order); -void CalcWeightBiasSums(int8_t *weight, int row, int col, int input_zp, int weight_zp, int *bias, int *dst, +void CalcWeightBiasSums(int8_t *weight, int row, int col, int input_zp, int weight_zp, const int *bias, int *dst, DataOrder order); /* 8x4 4x8 -> 8x8 */ diff --git a/mindspore/lite/nnacl/int8/slice_int8.c b/mindspore/lite/nnacl/int8/slice_int8.c index c893dbd8e2..6fb522441c 100644 --- a/mindspore/lite/nnacl/int8/slice_int8.c +++ b/mindspore/lite/nnacl/int8/slice_int8.c @@ -17,7 +17,6 @@ #include "nnacl/int8/slice_int8.h" #include #include "nnacl/quantization/fixed_point.h" -#include "nnacl/errorcode.h" int SliceInt8NoParallel(const int8_t *input, int8_t *output, SliceParameter *param) { double input_scale = param->quant_arg_.in_args_.scale_; diff --git a/mindspore/lite/nnacl/int8/softmax_int8.c b/mindspore/lite/nnacl/int8/softmax_int8.c index 7979cf09e6..4cf455aef7 100644 --- a/mindspore/lite/nnacl/int8/softmax_int8.c +++ b/mindspore/lite/nnacl/int8/softmax_int8.c @@ -18,7 +18,6 @@ #include #include "nnacl/quantization/fixed_point.h" #include "nnacl/quantization/quantize.h" -#include "nnacl/errorcode.h" int SoftmaxInt8(const int8_t *input_ptr, int8_t *output_ptr, int count, int *exp_data, int *sum_data, SoftmaxQuantArg quant_param, SoftmaxParameter *parameter) { diff --git a/mindspore/lite/nnacl/int8/space_to_batch_int8.c b/mindspore/lite/nnacl/int8/space_to_batch_int8.c index 84d15924e1..970437a281 100644 --- a/mindspore/lite/nnacl/int8/space_to_batch_int8.c +++ b/mindspore/lite/nnacl/int8/space_to_batch_int8.c @@ -16,7 +16,8 @@ #include "nnacl/int8/space_to_batch_int8.h" #include "nnacl/arithmetic_common.h" -void DoSpaceToBatchNHWCInt8(const int8_t *input, int8_t *output, int *block_sizes, int *in_shape, int *out_shape) { +void DoSpaceToBatchNHWCInt8(const int8_t *input, int8_t *output, const int *block_sizes, int *in_shape, + int *out_shape) { int out_dim0 = out_shape[0]; int out_dim1 = out_shape[1]; int out_dim2 = out_shape[2]; @@ -45,8 +46,8 @@ void DoSpaceToBatchNHWCInt8(const int8_t *input, int8_t *output, int *block_size } } -void DoSpaceToBatchPaddingNHWCInt8(const int8_t *input, int8_t *output, int *in_shape, int *padding, int *out_shape, - int32_t zp) { +void DoSpaceToBatchPaddingNHWCInt8(const int8_t *input, int8_t *output, int *in_shape, const int *padding, + int *out_shape, int32_t zp) { int in_h = in_shape[1]; int in_w = in_shape[2]; int in_c = in_shape[3]; diff --git a/mindspore/lite/nnacl/int8/space_to_batch_int8.h b/mindspore/lite/nnacl/int8/space_to_batch_int8.h index bae1828d3e..cd19b442ff 100644 --- a/mindspore/lite/nnacl/int8/space_to_batch_int8.h +++ b/mindspore/lite/nnacl/int8/space_to_batch_int8.h @@ -21,9 +21,9 @@ #ifdef __cplusplus extern "C" { #endif -void DoSpaceToBatchNHWCInt8(const int8_t *input, int8_t *output, int *block_sizes, int *in_shape, int *out_shape); -void DoSpaceToBatchPaddingNHWCInt8(const int8_t *input, int8_t *output, int *in_shape, int *padding, int *out_shape, - int32_t zp); +void DoSpaceToBatchNHWCInt8(const int8_t *input, int8_t *output, const int *block_sizes, int *in_shape, int *out_shape); +void DoSpaceToBatchPaddingNHWCInt8(const int8_t *input, int8_t *output, int *in_shape, const int *padding, + int *out_shape, int32_t zp); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/nnacl/int8/unsqueeze_int8.c b/mindspore/lite/nnacl/int8/unsqueeze_int8.c index 6706611393..a7c2b04984 100644 --- a/mindspore/lite/nnacl/int8/unsqueeze_int8.c +++ b/mindspore/lite/nnacl/int8/unsqueeze_int8.c @@ -17,7 +17,6 @@ #include "nnacl/unsqueeze_parameter.h" #include "nnacl/int8/unsqueeze_int8.h" #include -#include "nnacl/errorcode.h" int Int8Unsqueeze(int8_t *input_ptr, int8_t *output_ptr, UnSqueezeParameter *para_, size_t data_size, int task_id) { float output_scale = para_->quant_arg.out_quant_args_.scale_; diff --git a/mindspore/lite/nnacl/minimal_filtering_generator.c b/mindspore/lite/nnacl/minimal_filtering_generator.c index 7e4b0be340..a27b035993 100644 --- a/mindspore/lite/nnacl/minimal_filtering_generator.c +++ b/mindspore/lite/nnacl/minimal_filtering_generator.c @@ -19,7 +19,7 @@ #include "nnacl/winograd_utils.h" #include "nnacl/errorcode.h" -void Polynomial(float *interval, float *m, int degree) { +void Polynomial(const float *interval, float *m, int degree) { for (int i = 0; i < degree; ++i) { float mul = 1; for (int j = 0; j < degree; ++j) { @@ -30,7 +30,7 @@ void Polynomial(float *interval, float *m, int degree) { } } -void DiagonalPlusMatrix(float *matrix, float *diagonal_matrix, int degree) { +void DiagonalPlusMatrix(const float *matrix, float *diagonal_matrix, int degree) { int data_num = (degree + 1) * (degree + 1); memset(diagonal_matrix, 0, data_num * sizeof(float)); for (int i = 0; i < degree; ++i) { @@ -41,7 +41,7 @@ void DiagonalPlusMatrix(float *matrix, float *diagonal_matrix, int degree) { diagonal_matrix[data_num - 1] = 1; } -void ResidueMatrix(float *interval, float *b, int row, int col) { +void ResidueMatrix(const float *interval, float *b, int row, int col) { // row : input unit, col : output_unit // result : matrix b int len = row * col; @@ -87,7 +87,7 @@ int LT(float *poly_array, float *matrix_lt, int n) { return NNACL_OK; } -void T(float *poly_array, float *matrix_t, int n) { +void T(const float *poly_array, float *matrix_t, int n) { memset(matrix_t, 0, n * (n + 1) * sizeof(float)); for (int i = 0; i < n; ++i) { for (int j = 0; j < n + 1; ++j) { @@ -148,7 +148,7 @@ void GenerateIntervalArray(float *array, float interval, int degree) { } } -void MatrixTranspose(float *matrix, float *trans_matrix, int row, int col) { +void MatrixTranspose(const float *matrix, float *trans_matrix, int row, int col) { for (int i = 0; i < col; ++i) { for (int j = 0; j < row; ++j) { trans_matrix[i * row + j] = matrix[j * col + i]; @@ -255,7 +255,7 @@ void MatrixMultiplyVec(const float32x4_t *matrix_a, const float32x4_t *matrix_b, } #endif -int WinogradWeightTransform(const float *weight_data, float *winograd_data, float *matrix_g, float *matrix_gt, +int WinogradWeightTransform(const float *weight_data, float *winograd_data, float *matrix_g, const float *matrix_gt, int oc_block, int input_unit, int kernel_unit, int channel, int batch, bool pack) { // original weight format : ohwi int oc_block_num = UP_DIV(batch, oc_block); diff --git a/mindspore/lite/nnacl/minimal_filtering_generator.h b/mindspore/lite/nnacl/minimal_filtering_generator.h index 53b146940a..376794c7be 100644 --- a/mindspore/lite/nnacl/minimal_filtering_generator.h +++ b/mindspore/lite/nnacl/minimal_filtering_generator.h @@ -26,21 +26,21 @@ #ifdef __cplusplus extern "C" { #endif -void Polynomial(float *interval, float *m, int degree); +void Polynomial(const float *interval, float *m, int degree); -void DiagonalPlusMatrix(float *matrix, float *diagonal_matrix, int degree); +void DiagonalPlusMatrix(const float *matrix, float *diagonal_matrix, int degree); -void ResidueMatrix(float *interval, float *b, int row, int col); +void ResidueMatrix(const float *interval, float *b, int row, int col); int LT(float *poly_array, float *matrix_lt, int n); -void T(float *poly_array, float *matrix_t, int n); +void T(const float *poly_array, float *matrix_t, int n); int B(float *poly_array, float *matrix_b, int in_unit); void GenerateIntervalArray(float *array, float interval, int degree); -void MatrixTranspose(float *matrix, float *trans_matrix, int row, int col); +void MatrixTranspose(const float *matrix, float *trans_matrix, int row, int col); void MatrixMultiply(const float *matrix_a, const float *matrix_b, float *matrix_c, int m, int k, int n); @@ -49,7 +49,7 @@ int CookToomFilter(float *matrix_a, float *matrix_at, float *matrix_b, float *ma void MatrixMultiplyWinograd(const float *matix_a, const float *matrix_b, float *matrix_c, int m, int k, int n, int in_channel, int c4_channel); -int WinogradWeightTransform(const float *weight_data, float *winograd_data, float *matrix_g, float *matrix_gt, +int WinogradWeightTransform(const float *weight_data, float *winograd_data, float *matrix_g, const float *matrix_gt, int oc_block, int input_unit_, int kernel_unit_, int channel, int batch, bool pack); #ifdef ENABLE_ARM diff --git a/mindspore/lite/nnacl/power.c b/mindspore/lite/nnacl/power.c index 7b86129438..afce6af6c7 100644 --- a/mindspore/lite/nnacl/power.c +++ b/mindspore/lite/nnacl/power.c @@ -36,6 +36,9 @@ float StdPowerImpl(float x, float exponent) { return pow(x, exponent); } void Power(const float *input, const float *exponent, float *output, int len, float scale, float shift, bool broadcast) { + if (input == NULL || exponent == NULL) { + return; + } if (broadcast) { if (CheckInteger(*exponent)) { for (int i = 0; i < len; ++i) { diff --git a/mindspore/lite/nnacl/quantization/quantize.c b/mindspore/lite/nnacl/quantization/quantize.c index 1e3485ec71..ce3ed1ca4d 100644 --- a/mindspore/lite/nnacl/quantization/quantize.c +++ b/mindspore/lite/nnacl/quantization/quantize.c @@ -71,7 +71,7 @@ void CalculateActivationRangeQuantized(bool is_relu, bool is_relu6, int32_t zp, } // quantize from float to int8 -void Quantize(float *input_data, int length, float scale, int zero_point, int8_t *output_data) { +void Quantize(const float *input_data, int length, float scale, int zero_point, int8_t *output_data) { for (int i = 0; i < length; ++i) { int q = (int)round(input_data[i] / scale + zero_point); q = q > SCHAR_MAX ? SCHAR_MAX : q; diff --git a/mindspore/lite/nnacl/quantization/quantize.h b/mindspore/lite/nnacl/quantization/quantize.h index 354d19cc9b..f0e8f9a873 100644 --- a/mindspore/lite/nnacl/quantization/quantize.h +++ b/mindspore/lite/nnacl/quantization/quantize.h @@ -276,7 +276,7 @@ int32_t QuantizeToInt8(float real_value, float scale, int32_t zp); void CalculateActivationRangeQuantized(bool is_relu, bool is_relu6, int32_t zp, float scale, int *mini, int *maxi); // quantize from float to int8 -void Quantize(float *input_data, int length, float scale, int zero_point, int8_t *output_data); +void Quantize(const float *input_data, int length, float scale, int zero_point, int8_t *output_data); // dequantize from int8 to float void Dequantize(int8_t *input_data, int length, float scale, int zero_point, float *output_data); diff --git a/mindspore/lite/nnacl/reshape.c b/mindspore/lite/nnacl/reshape.c index 0b4f345ace..21ebbe95fd 100644 --- a/mindspore/lite/nnacl/reshape.c +++ b/mindspore/lite/nnacl/reshape.c @@ -17,4 +17,4 @@ #include "nnacl/reshape.h" #include -void Reshape(void *input_ptr, void *output_ptr, size_t data_size) { memcpy(output_ptr, input_ptr, data_size); } +void Reshape(const void *input_ptr, void *output_ptr, size_t data_size) { memcpy(output_ptr, input_ptr, data_size); } diff --git a/mindspore/lite/nnacl/reshape.h b/mindspore/lite/nnacl/reshape.h index 426bccee95..a14901bfcf 100644 --- a/mindspore/lite/nnacl/reshape.h +++ b/mindspore/lite/nnacl/reshape.h @@ -21,7 +21,7 @@ #ifdef __cplusplus extern "C" { #endif -void Reshape(void *input_ptr, void *output_ptr, size_t data_size); +void Reshape(const void *input_ptr, void *output_ptr, size_t data_size); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/nnacl/reverse_sequence.c b/mindspore/lite/nnacl/reverse_sequence.c index 79a0b95a51..78e4cb8757 100644 --- a/mindspore/lite/nnacl/reverse_sequence.c +++ b/mindspore/lite/nnacl/reverse_sequence.c @@ -18,7 +18,7 @@ #include #include "nnacl/arithmetic_common.h" -void ReverseSequence(float *input0, void *input1, float *output, ReverseSequenceParameter *para) { +void ReverseSequence(float *input0, const void *input1, float *output, ReverseSequenceParameter *para) { (void)memcpy(output, input0, para->total_data_size_); ComputeStrides(para->input_shape0_, para->input_stride_, para->ndim_); ComputeStrides(para->output_shape_, para->output_stride_, para->ndim_); diff --git a/mindspore/lite/nnacl/reverse_sequence.h b/mindspore/lite/nnacl/reverse_sequence.h index f85f13a8de..8080abad66 100644 --- a/mindspore/lite/nnacl/reverse_sequence.h +++ b/mindspore/lite/nnacl/reverse_sequence.h @@ -40,7 +40,7 @@ typedef struct ReverseSequenceParameter { #ifdef __cplusplus extern "C" { #endif -void ReverseSequence(float *input0, void *input1, float *output, ReverseSequenceParameter *para); +void ReverseSequence(float *input0, const void *input1, float *output, ReverseSequenceParameter *para); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/nnacl/scatter_nd.c b/mindspore/lite/nnacl/scatter_nd.c index 798ff76c45..56fabec065 100644 --- a/mindspore/lite/nnacl/scatter_nd.c +++ b/mindspore/lite/nnacl/scatter_nd.c @@ -19,7 +19,7 @@ #include #include "nnacl/errorcode.h" -int DoScatterND(float *output_ptr, float *update, int *output_unit_offsets, int unit_size, int num_units) { +int DoScatterND(float *output_ptr, const float *update, int *output_unit_offsets, int unit_size, int num_units) { if (output_ptr == NULL || update == NULL || output_unit_offsets == NULL || unit_size <= 0 || num_units < 0) { return NNACL_ERR; } diff --git a/mindspore/lite/nnacl/scatter_nd.h b/mindspore/lite/nnacl/scatter_nd.h index d8f04fbaf2..4dbd7bc465 100644 --- a/mindspore/lite/nnacl/scatter_nd.h +++ b/mindspore/lite/nnacl/scatter_nd.h @@ -26,7 +26,7 @@ typedef struct ScatterNDParameter { #ifdef __cplusplus extern "C" { #endif -int DoScatterND(float *output_ptr, float *update, int *output_unit_offsets, int unit_size, int num_units); +int DoScatterND(float *output_ptr, const float *update, int *output_unit_offsets, int unit_size, int num_units); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/nnacl/squeeze.c b/mindspore/lite/nnacl/squeeze.c index b02e6408f2..3ec5199ce0 100644 --- a/mindspore/lite/nnacl/squeeze.c +++ b/mindspore/lite/nnacl/squeeze.c @@ -18,7 +18,7 @@ #include #include "nnacl/errorcode.h" -int DoSqueeze(float *in_data, float *out_data, size_t data_size) { +int DoSqueeze(const float *in_data, float *out_data, size_t data_size) { if (in_data == NULL || out_data == NULL) { return -1; } diff --git a/mindspore/lite/nnacl/squeeze.h b/mindspore/lite/nnacl/squeeze.h index aecd5a05a6..7a44a0d93a 100644 --- a/mindspore/lite/nnacl/squeeze.h +++ b/mindspore/lite/nnacl/squeeze.h @@ -27,7 +27,7 @@ typedef struct SqueezeParameter { #ifdef __cplusplus extern "C" { #endif -int DoSqueeze(float *input_ptr, float *output_ptr, size_t data_size); +int DoSqueeze(const float *input_ptr, float *output_ptr, size_t data_size); int DoSqueezeInt32(int32_t *in_data, int32_t *out_data, size_t data_size); #ifdef __cplusplus } diff --git a/mindspore/lite/nnacl/transpose.c b/mindspore/lite/nnacl/transpose.c index 95186d398f..4dbe7a7d9e 100644 --- a/mindspore/lite/nnacl/transpose.c +++ b/mindspore/lite/nnacl/transpose.c @@ -18,7 +18,7 @@ #include #include "nnacl/errorcode.h" -void TransposeDim2(float *in_data, float *out_data, int *strides, int *out_strides, int *perm, int *output_shape, +void TransposeDim2(float *in_data, float *out_data, const int *strides, int *out_strides, int *perm, int *output_shape, int h_start, int h_end) { const int stride0 = strides[perm[0]]; const int stride1 = strides[perm[1]]; @@ -33,7 +33,7 @@ void TransposeDim2(float *in_data, float *out_data, int *strides, int *out_strid } } -void TransposeDim3(float *in_data, float *out_data, int *strides, int *out_strides, int *perm, int *output_shape, +void TransposeDim3(float *in_data, float *out_data, const int *strides, int *out_strides, int *perm, int *output_shape, int h_start, int h_end) { const int stride0 = strides[perm[0]]; const int stride1 = strides[perm[1]]; @@ -56,7 +56,7 @@ void TransposeDim3(float *in_data, float *out_data, int *strides, int *out_strid } } -void TransposeDim4(float *in_data, float *out_data, int *strides, int *out_strides, int *perm, int *output_shape, +void TransposeDim4(float *in_data, float *out_data, const int *strides, int *out_strides, int *perm, int *output_shape, int h_start, int h_end) { const int stride0 = strides[perm[0]]; const int stride1 = strides[perm[1]]; @@ -88,7 +88,7 @@ void TransposeDim4(float *in_data, float *out_data, int *strides, int *out_strid } } -void TransposeDim5(float *in_data, float *out_data, int *strides, int *out_strides, int *perm, int *output_shape, +void TransposeDim5(float *in_data, float *out_data, const int *strides, int *out_strides, int *perm, int *output_shape, int h_start, int h_end) { const int stride0 = strides[perm[0]]; const int stride1 = strides[perm[1]]; @@ -127,7 +127,7 @@ void TransposeDim5(float *in_data, float *out_data, int *strides, int *out_strid } } -void TransposeDims(float *in_data, float *out_data, int *strides, int *out_strides, int *perm, int *output_shape, +void TransposeDims(float *in_data, float *out_data, const int *strides, int *out_strides, int *perm, int *output_shape, int h_start, int h_end, int dims, int *size, int *position) { *(size + dims - 1) = 1; for (int i = dims - 1; i > 0; --i) { diff --git a/mindspore/lite/nnacl/transpose.h b/mindspore/lite/nnacl/transpose.h index edb2c7e921..753d9031f1 100644 --- a/mindspore/lite/nnacl/transpose.h +++ b/mindspore/lite/nnacl/transpose.h @@ -34,15 +34,15 @@ extern "C" { #endif int DoTranspose(float *in_data, float *out_data, int *input_shape, int *output_shape, TransposeParameter *transpose_param, int h_start, int h_end, int *size, int *position); -void TransposeDim2(float *in_data, float *out_data, int *strides, int *out_strides, int *perm, int *output_shape, +void TransposeDim2(float *in_data, float *out_data, const int *strides, int *out_strides, int *perm, int *output_shape, int h_start, int h_end); -void TransposeDim3(float *in_data, float *out_data, int *strides, int *out_strides, int *perm, int *output_shape, +void TransposeDim3(float *in_data, float *out_data, const int *strides, int *out_strides, int *perm, int *output_shape, int h_start, int h_end); -void TransposeDim4(float *in_data, float *out_data, int *strides, int *out_strides, int *perm, int *output_shape, +void TransposeDim4(float *in_data, float *out_data, const int *strides, int *out_strides, int *perm, int *output_shape, int h_start, int h_end); -void TransposeDim5(float *in_data, float *out_data, int *strides, int *out_strides, int *perm, int *output_shape, +void TransposeDim5(float *in_data, float *out_data, const int *strides, int *out_strides, int *perm, int *output_shape, int h_start, int h_end); -void TransposeDims(float *in_data, float *out_data, int *strides, int *out_strides, int *perm, int *output_shape, +void TransposeDims(float *in_data, float *out_data, const int *strides, int *out_strides, int *perm, int *output_shape, int h_start, int h_end, int dims, int *size, int *position); #ifdef __cplusplus } diff --git a/mindspore/lite/nnacl/unstack.c b/mindspore/lite/nnacl/unstack.c index e8a9f909a8..dac3463659 100644 --- a/mindspore/lite/nnacl/unstack.c +++ b/mindspore/lite/nnacl/unstack.c @@ -17,7 +17,7 @@ #include "nnacl/unstack.h" #include -void Unistack(float *input, float **output, UnstackParameter *para) { +void Unistack(const float *input, float **output, UnstackParameter *para) { for (int j = 0; j < para->num_; j++) { float *out_addr = output[j]; int out_offset = 0; diff --git a/mindspore/lite/nnacl/unstack.h b/mindspore/lite/nnacl/unstack.h index 957a9c91e9..a8dc8b5297 100644 --- a/mindspore/lite/nnacl/unstack.h +++ b/mindspore/lite/nnacl/unstack.h @@ -31,7 +31,7 @@ typedef struct UnstackParameter { #ifdef __cplusplus extern "C" { #endif -void Unistack(float *input, float **output, UnstackParameter *para); +void Unistack(const float *input, float **output, UnstackParameter *para); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/nnacl/where.c b/mindspore/lite/nnacl/where.c index c1e8fce2d3..43617b755a 100644 --- a/mindspore/lite/nnacl/where.c +++ b/mindspore/lite/nnacl/where.c @@ -15,7 +15,7 @@ */ #include "nnacl/where.h" -void Where(bool *input, float *input1, float *input2, float *output, WhereParameter *where_param_, int task_id) { +void Where(bool *input, float *input1, const float *input2, float *output, WhereParameter *where_param_, int task_id) { for (int i = task_id; i < where_param_->number_; i += where_param_->op_parameter_.thread_num_) { if (input[where_param_->num_ > 1 ? i : 0] == true) { output[i] = input1[where_param_->num1_ > 1 ? i : 0]; diff --git a/mindspore/lite/nnacl/where.h b/mindspore/lite/nnacl/where.h index ffd2a5810e..33e4f5b73c 100644 --- a/mindspore/lite/nnacl/where.h +++ b/mindspore/lite/nnacl/where.h @@ -30,7 +30,7 @@ typedef struct WhereParameter { #ifdef __cplusplus extern "C" { #endif -void Where(bool *input, float *input1, float *input2, float *output, WhereParameter *where_param_, int task_id); +void Where(bool *input, float *input1, const float *input2, float *output, WhereParameter *where_param_, int task_id); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/nnacl/winograd_utils.c b/mindspore/lite/nnacl/winograd_utils.c index 117c5a77da..f17d44323c 100644 --- a/mindspore/lite/nnacl/winograd_utils.c +++ b/mindspore/lite/nnacl/winograd_utils.c @@ -75,8 +75,8 @@ static OutputTransFunc OutputTransFuncRelu6List8[] = {NULL, OutputTransform8x6Relu6Unit, OutputTransform8x7Relu6Unit}; -void GeneralInputTransformUnit(const float *src_data, float *dst_data, float *matrix_b, float *matrix_bt, int src_step, - int dst_step, int in_unit) { +void GeneralInputTransformUnit(const float *src_data, float *dst_data, float *matrix_b, const float *matrix_bt, + int src_step, int dst_step, int in_unit) { int len = in_unit * in_unit; if (len > MAX_LEN) return; #ifdef ENABLE_ARM @@ -113,7 +113,7 @@ void GeneralInputTransformUnit(const float *src_data, float *dst_data, float *ma } void GeneralOutputTransformUnit(const float *src_data, float *dst_data, const float *bias_data, float *matrix_a, - float *matrix_at, int src_step, int dst_step, int in_unit, int out_unit) { + const float *matrix_at, int src_step, int dst_step, int in_unit, int out_unit) { int src_len = in_unit * in_unit; if (src_len > MAX_LEN) { return; diff --git a/mindspore/lite/nnacl/winograd_utils.h b/mindspore/lite/nnacl/winograd_utils.h index f9bdece471..bc6f29b697 100644 --- a/mindspore/lite/nnacl/winograd_utils.h +++ b/mindspore/lite/nnacl/winograd_utils.h @@ -33,11 +33,11 @@ typedef void (*InputTransFunc)(const float *src_data, float *dst_data, int src_s typedef void (*OutputTransFunc)(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void GeneralInputTransformUnit(const float *src_data, float *dst_data, float *matrix_b, float *matrix_bt, int src_step, - int dst_step, int in_unit); +void GeneralInputTransformUnit(const float *src_data, float *dst_data, float *matrix_b, const float *matrix_bt, + int src_step, int dst_step, int in_unit); void GeneralOutputTransformUnit(const float *src_data, float *dst_data, const float *bias_data, float *matrix_a, - float *matrix_at, int src_step, int dst_step, int in_unit, int out_unit); + const float *matrix_at, int src_step, int dst_step, int in_unit, int out_unit); #define Load16Data \ src[0] = vld1q_f32(src_data + 0 * src_step); \ diff --git a/mindspore/lite/src/common/file_utils.cc b/mindspore/lite/src/common/file_utils.cc index 25f63c1b65..c32ecd9605 100644 --- a/mindspore/lite/src/common/file_utils.cc +++ b/mindspore/lite/src/common/file_utils.cc @@ -85,7 +85,7 @@ std::string RealPath(const char *path) { return res; } -int CompareOutputData(float *output_data, size_t output_size, float *correct_data, size_t data_size) { +int CompareOutputData(const float *output_data, size_t output_size, float *correct_data, size_t data_size) { if (output_size != data_size) { printf("compare failed, output_size %zu isn't equal to data_size %zu.\n", output_size, data_size); return 0; diff --git a/mindspore/lite/src/common/file_utils.h b/mindspore/lite/src/common/file_utils.h index d7a37e26f6..aadf7f6e5b 100644 --- a/mindspore/lite/src/common/file_utils.h +++ b/mindspore/lite/src/common/file_utils.h @@ -58,7 +58,7 @@ inline int WriteToBin(const std::string &file_path, void *data, size_t size) { return 0; } -int CompareOutputData(float *output_data, size_t output_num, float *correct_data, size_t data_size); +int CompareOutputData(const float *output_data, size_t output_num, float *correct_data, size_t data_size); int CompareOutput(float *output_data, size_t output_num, std::string file_path); std::string GetAndroidPackageName(); diff --git a/mindspore/lite/src/common/file_utils_ext.cc b/mindspore/lite/src/common/file_utils_ext.cc index 49e5f7a369..c8110619ad 100644 --- a/mindspore/lite/src/common/file_utils_ext.cc +++ b/mindspore/lite/src/common/file_utils_ext.cc @@ -21,7 +21,7 @@ namespace mindspore { namespace lite { -static float CompareOutputRelativeData(float *output_data, float *correct_data, int data_size) { +static float CompareOutputRelativeData(const float *output_data, float *correct_data, int data_size) { float error = 0; // relative error diff --git a/mindspore/lite/src/ops/batch_norm.cc b/mindspore/lite/src/ops/batch_norm.cc index 0b09798d1b..3374ef1123 100644 --- a/mindspore/lite/src/ops/batch_norm.cc +++ b/mindspore/lite/src/ops/batch_norm.cc @@ -45,6 +45,7 @@ int BatchNorm::UnPackAttr(const Primitive &prim, const std::vector & if (attr == nullptr) { MS_LOG(ERROR) << "new FusedBatchNormT failed"; delete this->primitive_; + this->primitive_ = nullptr; return RET_ERROR; } attr->epsilon = GetValue(prim.GetAttr("epsilon")); diff --git a/mindspore/lite/src/ops/fused_batchnorm.cc b/mindspore/lite/src/ops/fused_batchnorm.cc index 11054cfd6d..5e9a899002 100644 --- a/mindspore/lite/src/ops/fused_batchnorm.cc +++ b/mindspore/lite/src/ops/fused_batchnorm.cc @@ -46,6 +46,10 @@ int FusedBatchNorm::UnPackAttr(const Primitive &prim, const std::vectorprimitive_->value.value == nullptr) { auto attr = new (std::nothrow) schema::FusedBatchNormT(); + if (attr == nullptr) { + MS_LOG(ERROR) << "new attr value failed"; + return RET_ERROR; + } attr->epsilon = GetValue(prim.GetAttr("epsilon")); attr->momentum = GetValue(prim.GetAttr("momentum")); this->primitive_->value.value = attr; diff --git a/mindspore/lite/src/ops/instance_norm.cc b/mindspore/lite/src/ops/instance_norm.cc index 115ddf34fb..62d8cfc65e 100644 --- a/mindspore/lite/src/ops/instance_norm.cc +++ b/mindspore/lite/src/ops/instance_norm.cc @@ -46,6 +46,7 @@ int InstanceNorm::UnPackAttr(const Primitive &prim, const std::vectorprimitive_; + this->primitive_ = nullptr; return RET_ERROR; } attr->epsilon = GetValue(prim.GetAttr("epsilon")); diff --git a/mindspore/lite/src/ops/populate/layer_norm_populate.cc b/mindspore/lite/src/ops/populate/layer_norm_populate.cc index 336771aacb..74d94e1630 100644 --- a/mindspore/lite/src/ops/populate/layer_norm_populate.cc +++ b/mindspore/lite/src/ops/populate/layer_norm_populate.cc @@ -33,6 +33,10 @@ OpParameter *PopulateLayerNormParameter(const mindspore::lite::PrimitiveC *primi auto normalized_shape = param->GetNormalizedShape(); layer_norm_parameter->normalized_dims_ = normalized_shape.size(); layer_norm_parameter->normalized_shape_ = reinterpret_cast(malloc(normalized_shape.size() * sizeof(int))); + if (layer_norm_parameter->normalized_shape_ == nullptr) { + MS_LOG(ERROR) << "malloc layer_norm_parameter->normalized_shape_ failed."; + return nullptr; + } for (size_t i = 0; i < normalized_shape.size(); i++) { layer_norm_parameter->normalized_shape_[i] = normalized_shape[i]; } diff --git a/mindspore/lite/src/ops/resize.cc b/mindspore/lite/src/ops/resize.cc index ffbb17b692..991f99f2ee 100644 --- a/mindspore/lite/src/ops/resize.cc +++ b/mindspore/lite/src/ops/resize.cc @@ -54,11 +54,18 @@ int Resize::UnPackAttr(const Primitive &prim, const std::vector &inp } if (this->primitive_->value.value == nullptr) { auto attr = new (std::nothrow) schema::ResizeT(); + if (attr == nullptr) { + MS_LOG(ERROR) << "new attr value failed"; + return RET_ERROR; + } if (prim.instance_name() == "ResizeNearestNeighbor") { attr->method = schema::ResizeMethod_NEAREST; } else if (prim.instance_name() == "ResizeBilinear") { attr->method = schema::ResizeMethod_LINEAR; } else { + if (attr != nullptr) { + delete attr; + } MS_LOG(ERROR) << "wrong resize type"; return RET_ERROR; } @@ -69,6 +76,9 @@ int Resize::UnPackAttr(const Primitive &prim, const std::vector &inp this->primitive_->value.value = attr; if (this->primitive_->value.value == nullptr) { + if (attr != nullptr) { + delete attr; + } MS_LOG(ERROR) << "new primitiveT value failed"; return RET_ERROR; } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.h b/mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.h index 89860337ba..95c5b7fa9e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/batch_to_space_base.h @@ -40,7 +40,7 @@ class BatchToSpaceBaseCPUKernel : public LiteKernel { bool IsNoCrop() const { return no_crop_; } private: - bool no_crop_; + bool no_crop_ = false; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/base/concat_base.h b/mindspore/lite/src/runtime/kernel/arm/base/concat_base.h index 0f5b6340b6..cd3b90b467 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/concat_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/concat_base.h @@ -43,9 +43,9 @@ class ConcatBaseCPUKernel : public LiteKernel { int Run() override { return 0; } protected: - int axis_; - const InnerContext *ctx_; - int thread_count_; + int axis_ = 0; + const InnerContext *ctx_ = nullptr; + int thread_count_ = 1; ConcatParameter *concat_param_ = nullptr; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.h b/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.h index 9857e73fd4..98747b5f5e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.h @@ -60,11 +60,11 @@ class ConvolutionBaseCPUKernel : public LiteKernel { protected: void *bias_data_ = nullptr; - const InnerContext *ctx_; - ConvParameter *conv_param_; - ConvQuantArg *conv_quant_arg_; - int tile_num_; - int thread_count_; + const InnerContext *ctx_ = nullptr; + ConvParameter *conv_param_ = nullptr; + ConvQuantArg *conv_quant_arg_ = nullptr; + int tile_num_ = 0; + int thread_count_ = 1; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/base/detection_post_process_base.h b/mindspore/lite/src/runtime/kernel/arm/base/detection_post_process_base.h index c48017a8d7..3f0010b744 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/detection_post_process_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/detection_post_process_base.h @@ -38,8 +38,8 @@ class DetectionPostProcessBaseCPUKernel : public LiteKernel { int Run() override; protected: - float *input_boxes; - float *input_scores; + float *input_boxes = nullptr; + float *input_scores = nullptr; virtual int GetInputData() = 0; }; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/fullconnection_base.h b/mindspore/lite/src/runtime/kernel/arm/base/fullconnection_base.h index f76263d8b2..86dfccce0d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/fullconnection_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/fullconnection_base.h @@ -40,10 +40,10 @@ class FullconnectionBaseCPUKernel : public LiteKernel { int Run() override { return 0; } protected: - MatMulParameter *fc_param_; - int thread_stride_; - const InnerContext *ctx_; - int thread_count_; + MatMulParameter *fc_param_ = nullptr; + int thread_stride_ = 0; + const InnerContext *ctx_ = nullptr; + int thread_count_ = 1; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/base/matmul_base.h b/mindspore/lite/src/runtime/kernel/arm/base/matmul_base.h index e987c2fdbc..7942974c05 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/matmul_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/matmul_base.h @@ -40,10 +40,10 @@ class MatmulBaseCPUKernel : public LiteKernel { int Run() override { return 0; } protected: - MatMulParameter *params_; - int thread_stride_; - const InnerContext *ctx_; - int thread_count_; + MatMulParameter *params_ = nullptr; + int thread_stride_ = 0; + const InnerContext *ctx_ = nullptr; + int thread_count_ = 0; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.cc index fe4bce767f..7af0b332af 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.cc @@ -71,6 +71,7 @@ void PoolingBaseCPUKernel::FreeQuantParam() { } } free(pooling_quant_arg_); + pooling_quant_arg_ = nullptr; } } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/resize_base.h b/mindspore/lite/src/runtime/kernel/arm/base/resize_base.h index 6ed3a47912..376b997e89 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/resize_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/resize_base.h @@ -37,12 +37,12 @@ class ResizeBaseCPUKernel : public LiteKernel { int ReSize() override { return 0; }; protected: - int method_; - int64_t new_height_; - int64_t new_width_; - bool align_corners_; - bool preserve_aspect_ratio; - bool const_shape_; + int method_ = 0; + int64_t new_height_ = 0; + int64_t new_width_ = 0; + bool align_corners_ = false; + bool preserve_aspect_ratio = false; + bool const_shape_ = false; private: int CheckParameters(); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/split_base.h b/mindspore/lite/src/runtime/kernel/arm/base/split_base.h index 6f94d4a425..f1bfa9a7a0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/split_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/split_base.h @@ -39,12 +39,12 @@ class SplitBaseCPUKernel : public LiteKernel { int Run() override { return 0; } protected: - const InnerContext *ctx_; - int thread_count_; - int thread_n_stride_; - int thread_n_num_; - int num_unit_; - SplitParameter *param; + const InnerContext *ctx_ = nullptr; + int thread_count_ = 1; + int thread_n_stride_ = 0; + int thread_n_num_ = 0; + int num_unit_ = 0; + SplitParameter *param = nullptr; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.h b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.h index 4053783039..f855599f85 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.h @@ -43,12 +43,12 @@ using mindspore::schema::PrimitiveType_Sub; namespace mindspore::kernel { class ArithmeticCPUKernel : public LiteKernel { - typedef int (*ArithmeticRun)(float *input0, float *input1, float *output, int element_size); - typedef int (*ArithmeticOptRun)(float *input0, float *input1, float *output, int element_size, - ArithmeticParameter *param); - typedef int (*ArithmeticIntRun)(int *input0, int *input1, int *output, int element_size); - typedef int (*ArithmeticOptIntRun)(int *input0, int *input1, int *output, int element_size, - ArithmeticParameter *param); + typedef int (*ArithmeticRun)(const float *input0, const float *input1, float *output, const int element_size); + typedef int (*ArithmeticOptRun)(const float *input0, const float *input1, float *output, const int element_size, + const ArithmeticParameter *param); + typedef int (*ArithmeticIntRun)(const int *input0, const int *input1, int *output, const int element_size); + typedef int (*ArithmeticOptIntRun)(const int *input0, const int *input1, int *output, const int element_size, + const ArithmeticParameter *param); public: ArithmeticCPUKernel(OpParameter *parameter, const std::vector &inputs, diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/constant_of_shape.h b/mindspore/lite/src/runtime/kernel/arm/fp32/constant_of_shape.h index 59827f5e96..e9ca8bcf75 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/constant_of_shape.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/constant_of_shape.h @@ -40,8 +40,8 @@ class ConstantOfShapeCPUKernel : public LiteKernel { int DoExecute(int task_id); private: - ConstantOfShapeParameter *param_; - void *out_ptr_; + ConstantOfShapeParameter *param_ = nullptr; + void *out_ptr_ = nullptr; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution.cc index af48215b6c..7602bbffec 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution.cc @@ -198,7 +198,12 @@ kernel::LiteKernel *CpuGroupConvFp32KernelCreator(const std::vector(op_parameter); int out_channel = inputs.at(kWeightIndex)->Batch(); int new_in_channel = inputs.at(kWeightIndex)->Channel(); - int new_out_channel = out_channel / group; + int new_out_channel = 0; + if (group == 0) { + MS_LOG(ERROR) << "Divisor 'group' cannot be 0."; + } else { + new_out_channel = out_channel / group; + } int kernel_h = conv_param->kernel_h_; int kernel_w = conv_param->kernel_w_; int input_num = inputs.size(); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution.h b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution.h index 4c8b25975f..386358067c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution.h @@ -52,11 +52,11 @@ class DeConvolutionCPUKernel : public ConvolutionBaseCPUKernel { private: MatMulParameter *matmul_param_ = nullptr; - int input_plane_; - int kernel_plane_; - int output_plane_; - int thread_count_; - int thread_stride_; + int input_plane_ = 0; + int kernel_plane_ = 0; + int output_plane_ = 0; + int thread_count_ = 1; + int thread_stride_ = 0; float *weight_ptr_ = nullptr; float *pack_input_ = nullptr; float *pack_output_ = nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_winograd.h b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_winograd.h index a1a15e9148..9522438299 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_winograd.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_winograd.h @@ -58,15 +58,15 @@ class DeConvolutionWinogradCPUKernel : public ConvolutionBaseCPUKernel { void FreeRunBuf(); private: - DeConvParam *deconv_param_; + DeConvParam *deconv_param_ = nullptr; float *nhwc_input_ = nullptr; float *nhwc_output_ = nullptr; float *nc4hw4_output_ = nullptr; float *tile_input_ = nullptr; float *tile_output_ = nullptr; std::mutex lock_; - int thread_num_hw_; - int thread_stride_hw_; + int thread_num_hw_ = 0; + int thread_stride_hw_ = 0; }; } // namespace mindspore::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_DECONVOLUTION_WINOGRAD_H_ diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/elu.h b/mindspore/lite/src/runtime/kernel/arm/fp32/elu.h index 3a3ccadda7..c382b84187 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/elu.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/elu.h @@ -36,13 +36,13 @@ class EluCPUKernel : public LiteKernel { int DoExcute(int task_id); protected: - const lite::InnerContext *ctx_; - int thread_count_; - EluParameter *elu_parameter_; + const lite::InnerContext *ctx_ = nullptr; + int thread_count_ = 1; + EluParameter *elu_parameter_ = nullptr; private: - float *input_addr; - float *output_addr; + float *input_addr = nullptr; + float *output_addr = nullptr; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup.h b/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup.h index 7c5e56c7bc..40f9b75d35 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup.h @@ -43,14 +43,14 @@ class EmbeddingLookupCPUKernel : public LiteKernel { int DoExcute(int task_id); protected: - const lite::InnerContext *ctx_; - int thread_count_; - EmbeddingLookupParameter *embedding_lookup_parameter_; + const lite::InnerContext *ctx_ = nullptr; + int thread_count_ = 1; + EmbeddingLookupParameter *embedding_lookup_parameter_ = nullptr; private: - float *input_addr_; - float *output_addr_; - int *ids_addr_; + float *input_addr_ = nullptr; + float *output_addr_ = nullptr; + int *ids_addr_ = nullptr; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/exp.h b/mindspore/lite/src/runtime/kernel/arm/fp32/exp.h index c21a0af52b..cc00b46ab9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/exp.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/exp.h @@ -36,13 +36,13 @@ class ExpCPUKernel : public LiteKernel { int DoExcute(int task_id); protected: - const lite::InnerContext *ctx_; - int thread_count_; - ExpParameter *exp_parameter_; + const lite::InnerContext *ctx_ = nullptr; + int thread_count_ = 1; + ExpParameter *exp_parameter_ = nullptr; private: - float *input_addr_; - float *output_addr_; + float *input_addr_ = nullptr; + float *output_addr_ = nullptr; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection.cc index dfc4f91ccf..6f6cec20e9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection.cc @@ -64,6 +64,10 @@ int FullconnectionCPUKernel::ReSize() { if (in_tensors_.size() == 3) { int col_tmp = is_vector_input_ ? fc_param_->col_ : fc_param_->col_8_; bias_ptr_ = reinterpret_cast(malloc(col_tmp * sizeof(float))); + if (bias_ptr_ == nullptr) { + MS_LOG(ERROR) << "malloc bias_ptr_ failed"; + return RET_ERROR; + } memcpy(bias_ptr_, in_tensors_[2]->MutableData(), fc_param_->col_ * sizeof(float)); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/pad.h b/mindspore/lite/src/runtime/kernel/arm/fp32/pad.h index bf2ad31571..4080927d02 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/pad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/pad.h @@ -49,9 +49,9 @@ class PadCPUKernel : public LiteKernel { int ExtendPaddings(int *paddings, int length, const int *ori_paddings, int ori_length); protected: - PadParameter *pad_param_; - int in_[4]; - int out_[4]; + PadParameter *pad_param_ = nullptr; + int in_[4] = {0}; + int out_[4] = {0}; }; int PadImpl(void *cdata, int task_id); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse.h b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse.h index ea99002f10..8205a8a8b4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse.h @@ -46,16 +46,16 @@ class ReverseCPUKernel : public LiteKernel { int DoReverse(int task_id); private: - int thread_sz_count_; - int thread_sz_stride_; - int data_size_; - int strides_[REVERSE_STRIDE_MAX_SIZE]; - int inCount_[REVERSE_STRIDE_MAX_SIZE]; - int outCount_[REVERSE_STRIDE_MAX_SIZE]; - int thread_count_; + int thread_sz_count_ = 0; + int thread_sz_stride_ = 0; + int data_size_ = 0; + int strides_[REVERSE_STRIDE_MAX_SIZE] = {0}; + int inCount_[REVERSE_STRIDE_MAX_SIZE] = {0}; + int outCount_[REVERSE_STRIDE_MAX_SIZE] = {0}; + int thread_count_ = 1; int *tmp_ = nullptr; - float *in_ptr_; - float *out_ptr_; + float *in_ptr_ = nullptr; + float *out_ptr_ = nullptr; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling.h b/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling.h index d5615bbb78..557604050f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling.h @@ -42,11 +42,11 @@ class ROIPoolingCPUKernel : public LiteKernel { int DoExecute(int task_id); private: - float *in_ptr_; - float *out_ptr_; - float *roi_ptr_; + float *in_ptr_ = nullptr; + float *out_ptr_ = nullptr; + float *roi_ptr_ = nullptr; float *max_c_ = nullptr; - ROIPoolingParameter *param_; + ROIPoolingParameter *param_ = nullptr; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/skip_gram.h b/mindspore/lite/src/runtime/kernel/arm/fp32/skip_gram.h index 3513987ea2..f14b911425 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/skip_gram.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/skip_gram.h @@ -38,9 +38,9 @@ class SkipGramCPUKernel : public LiteKernel { int DoExcute(int task_id); protected: - const lite::InnerContext *ctx_; - int thread_count_; - SkipGramParameter *skip_gram_parameter_; + const lite::InnerContext *ctx_ = nullptr; + int thread_count_ = 1; + SkipGramParameter *skip_gram_parameter_ = nullptr; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/softmax.h b/mindspore/lite/src/runtime/kernel/arm/fp32/softmax.h index 6436853cbd..cf23b56635 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/softmax.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/softmax.h @@ -40,8 +40,8 @@ class SoftmaxCPUKernel : public SoftmaxBaseCPUKernel { private: float *sum_data_ = nullptr; - int in_plane_size_; - int out_plane_size_; + int in_plane_size_ = 0; + int out_plane_size_ = 0; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.h b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.h index 4e243adbe5..9604614627 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.h @@ -34,11 +34,11 @@ class SpaceToDepthCPUKernel : public LiteKernel { int Run() override; private: - int thread_h_stride_; - int thread_h_num_; - int num_unit_; - float *input_ptr_; - float *output_ptr_; + int thread_h_stride_ = 0; + int thread_h_num_ = 0; + int num_unit_ = 0; + float *input_ptr_ = nullptr; + float *output_ptr_ = nullptr; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/split.h b/mindspore/lite/src/runtime/kernel/arm/fp32/split.h index 0c844f1693..8dd17b2ec9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/split.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/split.h @@ -36,7 +36,7 @@ class SplitCPUKernel : public SplitBaseCPUKernel { int Split(int task_id); private: - float *input_ptr_; + float *input_ptr_ = nullptr; std::vector output_ptr_; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/stack.h b/mindspore/lite/src/runtime/kernel/arm/fp32/stack.h index db42b29613..32ff146bac 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/stack.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/stack.h @@ -34,7 +34,7 @@ class StackCPUKernel : public LiteKernel { int Run() override; protected: - int axis_; + int axis_ = 0; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/tile.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/tile.cc index 82e88bfbf8..7e4fb9eaf1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/tile.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/tile.cc @@ -31,7 +31,7 @@ int TileCPUKernel::Init() { return ReSize(); } -void TileCPUKernel::ComputeStrides(int *shape, int *strides, int ndim) { +void TileCPUKernel::ComputeStrides(const int *shape, int *strides, int ndim) { int stride = 1; for (int i = ndim - 1; i >= 0; i--) { strides[i] = stride; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/tile.h b/mindspore/lite/src/runtime/kernel/arm/fp32/tile.h index 76fc5f32ca..02906ac809 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/tile.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/tile.h @@ -34,7 +34,7 @@ class TileCPUKernel : public LiteKernel { int Run() override; private: - void ComputeStrides(int *shape, int *strides, int ndim); + void ComputeStrides(const int *shape, int *strides, int ndim); }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.cc index 3eaa4fc31c..5bb5c2fd5a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.cc @@ -63,6 +63,7 @@ int TransposeCPUKernel::ReSize() { } if (this->out_shape_ != nullptr) { free(this->out_shape_); + this->out_shape_ = nullptr; } in_shape_ = reinterpret_cast(malloc(in_shape.size() * sizeof(int))); if (in_shape_ == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.h b/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.h index 823ab97bf2..7c49497c32 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/transpose.h @@ -38,12 +38,12 @@ class TransposeCPUKernel : public LiteKernel { int TransposeParallel(int task_id); private: - int thread_num_; - int thread_h_stride_; - int thread_h_num_; - int num_unit_; - float *in_data_; - float *out_data_; + int thread_num_ = 1; + int thread_h_stride_ = 0; + int thread_h_num_ = 0; + int num_unit_ = 0; + float *in_data_ = nullptr; + float *out_data_ = nullptr; int *in_shape_ = nullptr; int *out_shape_ = nullptr; int *dim_size_ = nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_self_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_self_grad.h index 4c5d04925e..37a2995dc7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_self_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_self_grad.h @@ -24,7 +24,7 @@ namespace mindspore::kernel { class ArithmeticSelfGradCPUKernel : public LiteKernel { - typedef int (*ArithmeticSelfGradOperation)(float *, float *, float *, int); + typedef int (*ArithmeticSelfGradOperation)(const float *, const float *, float *, const int); public: ArithmeticSelfGradCPUKernel(OpParameter *parameter, const std::vector &inputs, diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.cc index 5efae074f4..9236f83c48 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.cc @@ -200,7 +200,7 @@ int MirrorPadImplInt8(void *cdata, int task_id) { return RET_OK; } -int PadInt8CPUKernel::CheckPaddings(int *paddings, int length, int *input_shape, int mode) { +int PadInt8CPUKernel::CheckPaddings(const int *paddings, int length, int *input_shape, int mode) { if (paddings == nullptr || input_shape == nullptr) { return RET_NULL_PTR; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.h index 5508be0916..bb811ee94f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.h @@ -48,7 +48,7 @@ class PadInt8CPUKernel : public LiteKernel { private: int HandleMirrorPad(); - int CheckPaddings(int *paddings, int length, int *input_shape, int mode); + int CheckPaddings(const int *paddings, int length, int *input_shape, int mode); int CopyPaddingFromInput(); void CalculateStrides(); int ExtendPaddings(int *paddings, int length, const int *ori_paddings, int ori_length); @@ -56,8 +56,8 @@ class PadInt8CPUKernel : public LiteKernel { PadParameter *pad_param_ = nullptr; int8_t *in_data_ = nullptr; int8_t *out_data_ = nullptr; - int in_dims_[DEFAULT_PAD_NDIMS]; - int out_dims_[DEFAULT_PAD_NDIMS]; + int in_dims_[DEFAULT_PAD_NDIMS] = {0}; + int out_dims_[DEFAULT_PAD_NDIMS] = {0}; }; } // namespace mindspore::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_PAD_INT8_H_ diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/scale_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/scale_int8.h index 2949f32a2e..db76d4373e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/scale_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/scale_int8.h @@ -46,7 +46,7 @@ class ScaleInt8CPUKernel : public LiteKernel { int8_t *scale_ = nullptr; int8_t *offset_ = nullptr; int8_t *output_ptr_ = nullptr; - bool has_bias_; + bool has_bias_ = false; ScaleParameter *scale_param_; int InitQuantArgs();