diff --git a/mindspore/lite/nnacl/fp16/conv_fp16.c b/mindspore/lite/nnacl/fp16/conv_fp16.c index 6237e97f8b..04c69c7fb2 100644 --- a/mindspore/lite/nnacl/fp16/conv_fp16.c +++ b/mindspore/lite/nnacl/fp16/conv_fp16.c @@ -279,7 +279,7 @@ void ConvSWFp16(const float16_t *input_data, const float16_t *packed_weight, con bool relu6 = conv_param->act_type_ == ActType_Relu6; int oc4_res = conv_param->output_channel_ % C4NUM; const float16_t *src = input_data; - float16_t *dst; + float16_t *dst = NULL; if (oc4_res == 0) { dst = output_data; } else { diff --git a/mindspore/lite/nnacl/fp32/common_func.c b/mindspore/lite/nnacl/fp32/common_func.c index 188f39b804..fb0286000f 100644 --- a/mindspore/lite/nnacl/fp32/common_func.c +++ b/mindspore/lite/nnacl/fp32/common_func.c @@ -17,8 +17,14 @@ #include "nnacl/fp32/common_func.h" void PostConvFuncComm(const float *src_ptr_, float *out_ptr, const float *bias_ptr, size_t output_channel, size_t plane_size, size_t stride, bool is_relu, bool is_relu6, int size) { + int oc_div = 0, oc_mod = 0; for (int oc = 0; oc < output_channel; oc++) { - int oc_div = oc / size, oc_mod = oc % size; + if (size != 0) { + oc_div = oc / size; + oc_mod = oc % size; + } else { + return; + } for (int hw = 0; hw < plane_size; hw++) { int src_index = oc_div * size * plane_size + hw * size + oc_mod; int dst_index = hw * stride + oc; diff --git a/mindspore/lite/nnacl/fp32/roi_pooling.c b/mindspore/lite/nnacl/fp32/roi_pooling.c index fd79caa14e..1f4f1b9839 100644 --- a/mindspore/lite/nnacl/fp32/roi_pooling.c +++ b/mindspore/lite/nnacl/fp32/roi_pooling.c @@ -35,7 +35,7 @@ int ROIPooling(float *in_ptr, float *out_ptr, float *roi, int tid, ROIPoolingPar int scale = param->scale_; int pooled_height = param->pooledH_; int pooled_width = param->pooledW_; - int roi_stride = 5; + const int roi_stride = 5; int roi_ind_st = roi_st * roi_stride; float *max_c = malloc(channels_ * sizeof(float)); for (int i = roi_st; i < roi_end; ++i) { diff --git a/mindspore/lite/nnacl/int8/leaky_relu_int8.c b/mindspore/lite/nnacl/int8/leaky_relu_int8.c index ced18e6fbe..540a7f898a 100644 --- a/mindspore/lite/nnacl/int8/leaky_relu_int8.c +++ b/mindspore/lite/nnacl/int8/leaky_relu_int8.c @@ -17,6 +17,9 @@ #include "nnacl/int8/leaky_relu_int8.h" void DoLeakReluInt8(int8_t *inputs, int8_t *output_ptr, LeakyReluQuantArg *quant_prelu_parm, int task_id) { + if (quant_prelu_parm == NULL) { + return; + } float output_scale = quant_prelu_parm->quant_arg.out_args_.scale_; int output_zp = quant_prelu_parm->quant_arg.out_args_.zp_; const float output_inverse_scale = 1.f / output_scale; diff --git a/mindspore/lite/nnacl/int8/matmul_int8.c b/mindspore/lite/nnacl/int8/matmul_int8.c index 1135cc5e09..26aa3269df 100644 --- a/mindspore/lite/nnacl/int8/matmul_int8.c +++ b/mindspore/lite/nnacl/int8/matmul_int8.c @@ -328,7 +328,7 @@ void CalcWeightBiasSums(int8_t *weight, int row, int col, int input_zp, int weig } } dst[c] = row * input_zp * weight_zp - input_zp * sum; - if (bias) { + if (bias != NULL) { dst[c] += bias[c]; } } diff --git a/mindspore/lite/src/ops/conv2d.cc b/mindspore/lite/src/ops/conv2d.cc index 38151eef0f..7c9c32058f 100644 --- a/mindspore/lite/src/ops/conv2d.cc +++ b/mindspore/lite/src/ops/conv2d.cc @@ -218,8 +218,8 @@ void Conv2D::PopulaterConv2DSingleGroup(const Primitive &prim, schema::Primitive } void Conv2D::CalQuantParam(const double &mean, const double &stdDev, float *mMin, float *mMax) { - constexpr float qmin = 0; - constexpr float qmax = 255; + const float qmin = 0; + const float qmax = 255; *mMin = static_cast((qmin - mean) / stdDev); *mMax = static_cast((qmax - mean) / stdDev); } diff --git a/mindspore/lite/src/ops/depthwise_conv2d.cc b/mindspore/lite/src/ops/depthwise_conv2d.cc index a66e33f82a..ba318a1bb1 100644 --- a/mindspore/lite/src/ops/depthwise_conv2d.cc +++ b/mindspore/lite/src/ops/depthwise_conv2d.cc @@ -70,8 +70,8 @@ void DepthwiseConv2D::SetActivationType(int activation_type) { } void DepthwiseConv2D::CalQuantParam(const double &mean, const double &stdDev, float *mMin, float *mMax) { - constexpr float qmin = 0; - constexpr float qmax = 255; + const float qmin = 0; + const float qmax = 255; *mMin = static_cast((qmin - mean) / stdDev); *mMax = static_cast((qmax - mean) / stdDev); } diff --git a/mindspore/lite/src/ops/matmul.cc b/mindspore/lite/src/ops/matmul.cc index 815eaa24a5..c12038312c 100644 --- a/mindspore/lite/src/ops/matmul.cc +++ b/mindspore/lite/src/ops/matmul.cc @@ -31,8 +31,8 @@ void MatMul::SetTransposeA(bool transpose_a) { this->primitive_->value.AsMatMul( void MatMul::SetTransposeB(bool transpose_b) { this->primitive_->value.AsMatMul()->transposeB = transpose_b; } void MatMul::CalQuantParam(const double &mean, const double &stdDev, float *mMin, float *mMax) { - constexpr float qmin = 0; - constexpr float qmax = 255; + const float qmin = 0; + const float qmax = 255; *mMin = static_cast((qmin - mean) / stdDev); *mMax = static_cast((qmax - mean) / stdDev); } diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/to_format.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/to_format.cc index aeb3209fc1..93b67fba4e 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/to_format.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/to_format.cc @@ -119,8 +119,8 @@ int ToFormatOpenCLKernel::GetImageSize(size_t idx, std::vector *img_size im_dst_x = w * UP_DIV(c, C4NUM); im_dst_y = h; } else if (out_tensors_[0]->GetFormat() == schema::Format_NC4) { - int h = 1; - int w = 1; + const int h = 1; + const int w = 1; int c = shapex[1]; im_dst_x = w * UP_DIV(c, C4NUM); im_dst_y = h; diff --git a/mindspore/lite/src/runtime/kernel/opencl/subgraph_opencl_kernel.cc b/mindspore/lite/src/runtime/kernel/opencl/subgraph_opencl_kernel.cc index 2de2122fef..51d13c39c5 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/subgraph_opencl_kernel.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/subgraph_opencl_kernel.cc @@ -109,7 +109,7 @@ int SubGraphOpenCLKernel::GenToFormatOp(const std::vectordst_format = dst_format; parameter->out_mem_type = mem_type; out_parameters->emplace_back(parameter); - LiteKernel *in_convert_op; + LiteKernel *in_convert_op = nullptr; if (mem_type == OpenCLMemType::IMG) { in_convert_op = lite::GetOpenCLKernel({in_tensors[i]}, {new_tensor}, reinterpret_cast(parameter), nullptr, desc); diff --git a/mindspore/lite/src/runtime/opencl/opencl_runtime.cc b/mindspore/lite/src/runtime/opencl/opencl_runtime.cc index 165ffe9f23..891b69d9f0 100644 --- a/mindspore/lite/src/runtime/opencl/opencl_runtime.cc +++ b/mindspore/lite/src/runtime/opencl/opencl_runtime.cc @@ -198,7 +198,7 @@ int OpenCLRuntime::Init() { MS_LOG(INFO) << "Compute Unit: " << compute_units_; MS_LOG(INFO) << "Clock Frequency: " << max_freq_ << " MHz"; - cl_command_queue_properties properties = 0; + const cl_command_queue_properties properties = 0; #if MS_OPENCL_PROFILE properties |= CL_QUEUE_PROFILING_ENABLE; #endif diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.cc index 1327074f11..8468102cda 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.cc @@ -402,6 +402,7 @@ STATUS OnnxModelParser::CopyOnnxTensorData(const onnx::TensorProto &onnx_const_v data_size = data_count * sizeof(int32_t); buffer = std::make_unique(data_count); const int64_t *in_data; + in_data = nullptr; if (onnx_const_value.int64_data_size() == 0) { in_data = reinterpret_cast(onnx_const_value.raw_data().data()); } else {