diff --git a/mindspore/lite/java/java/app/src/main/java/com/mindspore/lite/Model.java b/mindspore/lite/java/java/app/src/main/java/com/mindspore/lite/Model.java index 4e2a959a45..0b80c1be3b 100644 --- a/mindspore/lite/java/java/app/src/main/java/com/mindspore/lite/Model.java +++ b/mindspore/lite/java/java/app/src/main/java/com/mindspore/lite/Model.java @@ -51,21 +51,21 @@ public class Model { ret = this.modelPtr != 0; } catch (IOException e) { this.modelPtr = 0; - Log.e("MS_LITE", "Load model failed: " + e.getMessage()); + Log.e("MS_LITE", "Load model failed"); ret = false; } finally { if (null != fis) { try { fis.close(); } catch (IOException e) { - Log.e("MS_LITE", "Close file failed: " + e.getMessage()); + Log.e("MS_LITE", "Close file failed"); } } if (null != fileDescriptor) { try { fileDescriptor.close(); } catch (IOException e) { - Log.e("MS_LITE", "Close fileDescriptor failed: " + e.getMessage()); + Log.e("MS_LITE", "Close fileDescriptor failed"); } } } diff --git a/mindspore/lite/nnacl/fp32/matmul_fp32.c b/mindspore/lite/nnacl/fp32/matmul_fp32.c index f05bed22c3..66846b9d82 100644 --- a/mindspore/lite/nnacl/fp32/matmul_fp32.c +++ b/mindspore/lite/nnacl/fp32/matmul_fp32.c @@ -24,9 +24,9 @@ void RowMajor2ColMajor(const float *src_ptr, float *dst_ptr, int row, int col) { } } -void RowMajor2Row4Major(float *src_ptr, float *dst_ptr, int row, int col) { +void RowMajor2Row4Major(const float *src_ptr, float *dst_ptr, int row, int col) { for (int r = 0; r < row; r++) { - float *src = src_ptr + r * col; + const float *src = src_ptr + r * col; for (int c = 0; c < col; c++) { int cd8 = c / 4; int cm8 = c % 4; @@ -36,9 +36,9 @@ void RowMajor2Row4Major(float *src_ptr, float *dst_ptr, int row, int col) { return; } -void RowMajor2Row8Major(float *src_ptr, float *dst_ptr, int row, int col) { +void RowMajor2Row8Major(const float *src_ptr, float *dst_ptr, int row, int col) { for (int r = 0; r < row; r++) { - float *src = src_ptr + r * col; + const float *src = src_ptr + r * col; for (int c = 0; c < col; c++) { int cd8 = c / 8; int cm8 = c % 8; @@ -48,9 +48,9 @@ void RowMajor2Row8Major(float *src_ptr, float *dst_ptr, int row, int col) { return; } -void RowMajor2Row12Major(float *src_ptr, float *dst_ptr, int row, int col) { +void RowMajor2Row12Major(const float *src_ptr, float *dst_ptr, int row, int col) { for (int r = 0; r < row; r++) { - float *src = src_ptr + r * col; + const float *src = src_ptr + r * col; for (int c = 0; c < col; c++) { int cd8 = c / C12NUM; int cm8 = c % C12NUM; @@ -60,18 +60,18 @@ void RowMajor2Row12Major(float *src_ptr, float *dst_ptr, int row, int col) { return; } -void RowMajor2Col12Major(float *src_ptr, float *dst_ptr, size_t row, size_t col) { +void RowMajor2Col12Major(const float *src_ptr, float *dst_ptr, size_t row, size_t col) { size_t row_up_12 = UP_ROUND(row, C12NUM); size_t row12 = row / C12NUM * C12NUM; size_t col4 = col / C4NUM * C4NUM; - float *src_r = src_ptr; + const float *src_r = src_ptr; float *dst_r = dst_ptr; size_t ri = 0; for (; ri < row12; ri += C12NUM) { size_t ci = 0; for (; ci < col4; ci += C4NUM) { - float *src_c = src_r + ci; + const float *src_c = src_r + ci; float *dst_c = dst_r + ci * C12NUM; /* 12x4 row-major to col-major */ @@ -255,7 +255,7 @@ void RowMajor2Col12Major(float *src_ptr, float *dst_ptr, size_t row, size_t col) #endif } for (; ci < col; ci++) { - float *src_c = src_r + ci; + const float *src_c = src_r + ci; float *dst_c = dst_r + ci * C12NUM; for (size_t i = 0; i < C12NUM; i++) { dst_c[i] = src_c[i * col]; @@ -282,7 +282,7 @@ void RowMajor2Col12Major(float *src_ptr, float *dst_ptr, size_t row, size_t col) return; } -void RowMajor2Col8Major(float *src_ptr, float *dst_ptr, size_t row, size_t col) { +void RowMajor2Col8Major(const float *src_ptr, float *dst_ptr, size_t row, size_t col) { size_t row8 = row / C8NUM * C8NUM; #ifdef ENABLE_ARM64 size_t col_skip = col / C8NUM * C8NUM; @@ -291,14 +291,14 @@ void RowMajor2Col8Major(float *src_ptr, float *dst_ptr, size_t row, size_t col) size_t col_skip = col / C4NUM * C4NUM; int skip_size = C4NUM; #endif - float *src_r = src_ptr; + const float *src_r = src_ptr; float *dst_r = dst_ptr; size_t ri = 0; for (; ri < row8; ri += C8NUM) { size_t ci = 0; for (; ci < col_skip; ci += skip_size) { - float *src_c = src_r + ci; + const float *src_c = src_r + ci; float *dst_c = dst_r + ci * C8NUM; #ifdef ENABLE_ARM64 @@ -459,7 +459,7 @@ void RowMajor2Col8Major(float *src_ptr, float *dst_ptr, size_t row, size_t col) #endif } for (; ci < col; ci++) { - float *src_c = src_r + ci; + const float *src_c = src_r + ci; float *dst_c = dst_r + ci * C8NUM; for (size_t i = 0; i < C8NUM; i++) { dst_c[i] = src_c[i * col]; @@ -478,17 +478,17 @@ void RowMajor2Col8Major(float *src_ptr, float *dst_ptr, size_t row, size_t col) return; } -void RowMajor2Col4Major(float *src_ptr, float *dst_ptr, size_t row, size_t col) { +void RowMajor2Col4Major(const float *src_ptr, float *dst_ptr, size_t row, size_t col) { size_t row8 = row / C4NUM * C4NUM; size_t col4 = col / C4NUM * C4NUM; - float *src_r = src_ptr; + const float *src_r = src_ptr; float *dst_r = dst_ptr; size_t ri = 0; for (; ri < row8; ri += C4NUM) { size_t ci = 0; for (; ci < col4; ci += C4NUM) { - float *src_c = src_r + ci; + const float *src_c = src_r + ci; float *dst_c = dst_r + ci * C4NUM; /* 4x4 row-major to col-major */ @@ -548,7 +548,7 @@ void RowMajor2Col4Major(float *src_ptr, float *dst_ptr, size_t row, size_t col) #endif } for (; ci < col; ci++) { - float *src_c = src_r + ci; + const float *src_c = src_r + ci; float *dst_c = dst_r + ci * C4NUM; for (size_t i = 0; i < C4NUM; i++) { dst_c[i] = src_c[i * col]; diff --git a/mindspore/lite/nnacl/fp32/matmul_fp32.h b/mindspore/lite/nnacl/fp32/matmul_fp32.h index 07501f2014..ec78ed56b0 100644 --- a/mindspore/lite/nnacl/fp32/matmul_fp32.h +++ b/mindspore/lite/nnacl/fp32/matmul_fp32.h @@ -30,12 +30,12 @@ void MatMulOpt(const float *a, const float *b, float *c, const float *bias, ActT int col, size_t stride, int out_type); void MatVecMul(const float *a, const float *b, float *c, const float *bias, ActType act_type, int depth, int col); void RowMajor2ColMajor(const float *src_ptr, float *dst_ptr, int row, int col); -void RowMajor2Row4Major(float *src_ptr, float *dst_ptr, int row, int col); -void RowMajor2Row8Major(float *src_ptr, float *dst_ptr, int row, int col); -void RowMajor2Row12Major(float *src_ptr, float *dst_ptr, int row, int col); -void RowMajor2Col4Major(float *src_ptr, float *dst_ptr, size_t row, size_t col); -void RowMajor2Col8Major(float *src_ptr, float *dst_ptr, size_t row, size_t col); -void RowMajor2Col12Major(float *src_ptr, float *dst_ptr, size_t row, size_t col); +void RowMajor2Row4Major(const float *src_ptr, float *dst_ptr, int row, int col); +void RowMajor2Row8Major(const float *src_ptr, float *dst_ptr, int row, int col); +void RowMajor2Row12Major(const float *src_ptr, float *dst_ptr, int row, int col); +void RowMajor2Col4Major(const float *src_ptr, float *dst_ptr, size_t row, size_t col); +void RowMajor2Col8Major(const float *src_ptr, float *dst_ptr, size_t row, size_t col); +void RowMajor2Col12Major(const float *src_ptr, float *dst_ptr, size_t row, size_t col); #ifdef ENABLE_ARM64 void MatmulFloatNeon64(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, int row, int col, size_t stride, size_t writeNhwc, size_t WriteWino); diff --git a/mindspore/lite/src/ops/scatter_nd.cc b/mindspore/lite/src/ops/scatter_nd.cc index c89162fb2d..0767d19a70 100644 --- a/mindspore/lite/src/ops/scatter_nd.cc +++ b/mindspore/lite/src/ops/scatter_nd.cc @@ -55,6 +55,10 @@ int ScatterND::InferShape(std::vector inputs_, std::vector o return RET_ERROR; } auto output = outputs_.front(); + if (output == nullptr) { + MS_LOG(ERROR) << "output null pointer dereferencing."; + return RET_ERROR; + } output->set_data_type(update->data_type()); output->set_format(update->format()); if (!infer_flag()) { diff --git a/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.cc index f1e9a61da0..d816f4dadd 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.cc @@ -37,6 +37,7 @@ int PoolingBaseCPUKernel::SetQuantParam() { } pooling_quant_arg_[0] = reinterpret_cast(malloc(sizeof(QuantArg))); if (pooling_quant_arg_[0] == nullptr) { + pooling_quant_arg_[1] = nullptr; MS_LOG(ERROR) << "malloc pooling_quant_arg[0] failed."; free(pooling_quant_arg_); pooling_quant_arg_ = nullptr; @@ -45,10 +46,10 @@ int PoolingBaseCPUKernel::SetQuantParam() { pooling_quant_arg_[1] = reinterpret_cast(malloc(sizeof(QuantArg))); if (pooling_quant_arg_[1] == nullptr) { MS_LOG(ERROR) << "malloc pooling_quant_arg[1] failed."; - free(*pooling_quant_arg_); + free(pooling_quant_arg_[0]); free(pooling_quant_arg_); + pooling_quant_arg_[0] = nullptr; pooling_quant_arg_ = nullptr; - *this->pooling_quant_arg_ = nullptr; return RET_MEMORY_FAILED; } auto *input_tensor = in_tensors_.at(kInputIndex); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection_fp32.cc index c1eed749e7..cdb3e95077 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection_fp32.cc @@ -118,7 +118,7 @@ int FullconnectionCPUKernel::Init() { return ReSize(); } -void FullconnectionCPUKernel::InitMatrixA(float *src_ptr, float *dst_ptr) { +void FullconnectionCPUKernel::InitMatrixA(const float *src_ptr, float *dst_ptr) { if (is_vector_input_) { memcpy(dst_ptr, src_ptr, fc_param_->deep_ * sizeof(float)); return; @@ -131,7 +131,7 @@ void FullconnectionCPUKernel::InitMatrixA(float *src_ptr, float *dst_ptr) { #endif } -void FullconnectionCPUKernel::InitMatrixB(float *src_ptr, float *dst_ptr) { +void FullconnectionCPUKernel::InitMatrixB(const float *src_ptr, float *dst_ptr) { if (is_vector_input_) { memcpy(dst_ptr, src_ptr, fc_param_->col_ * fc_param_->deep_ * sizeof(float)); return; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection_fp32.h index 7b08a9d968..8ebe1934d7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection_fp32.h @@ -43,8 +43,8 @@ class FullconnectionCPUKernel : public FullconnectionBaseCPUKernel { void FreeBuf(); private: - void InitMatrixA(float *src_ptr, float *dst_ptr); - void InitMatrixB(float *src_ptr, float *dst_ptr); + void InitMatrixA(const float *src_ptr, float *dst_ptr); + void InitMatrixB(const float *src_ptr, float *dst_ptr); private: float *a_pack_ptr_ = nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/matmul_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/matmul_fp32.cc index f8a103bb4c..c1cfd09545 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/matmul_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/matmul_fp32.cc @@ -167,14 +167,14 @@ int MatmulCPUKernel::ReSize() { return RET_OK; } -void MatmulCPUKernel::InitMatrixA(float *src_ptr, float *dst_ptr) { +void MatmulCPUKernel::InitMatrixA(const float *src_ptr, float *dst_ptr) { if (is_vector_a_) { memcpy(dst_ptr, src_ptr, params_->batch * params_->deep_ * sizeof(float)); return; } for (int i = 0; i < params_->batch; i++) { - float *src = src_ptr + i * params_->deep_ * params_->row_; + const float *src = src_ptr + i * params_->deep_ * params_->row_; #if defined(ENABLE_ARM32) || defined(ENABLE_X86_64_SSE) float *dst = dst_ptr + i * params_->deep_ * params_->row_4_; if (params_->a_transpose_) { @@ -194,13 +194,13 @@ void MatmulCPUKernel::InitMatrixA(float *src_ptr, float *dst_ptr) { return; } -void MatmulCPUKernel::InitMatrixB(float *src_ptr, float *dst_ptr) { +void MatmulCPUKernel::InitMatrixB(const float *src_ptr, float *dst_ptr) { if (is_vector_a_) { if (params_->b_transpose_) { memcpy(dst_ptr, src_ptr, params_->batch * params_->col_ * params_->deep_ * sizeof(float)); } else { for (int i = 0; i < params_->batch; i++) { - float *src = src_ptr + i * params_->deep_ * params_->col_; + const float *src = src_ptr + i * params_->deep_ * params_->col_; float *dst = dst_ptr + i * params_->deep_ * params_->col_; RowMajor2ColMajor(src, dst, params_->deep_, params_->col_); } @@ -209,7 +209,7 @@ void MatmulCPUKernel::InitMatrixB(float *src_ptr, float *dst_ptr) { } for (int i = 0; i < params_->batch; i++) { - float *src = src_ptr + i * params_->deep_ * params_->col_; + const float *src = src_ptr + i * params_->deep_ * params_->col_; float *dst = dst_ptr + i * params_->deep_ * params_->col_8_; if (params_->b_transpose_) { RowMajor2Col8Major(src, dst, params_->col_, params_->deep_); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/matmul_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/matmul_fp32.h index f48e53d608..590e943e38 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/matmul_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/matmul_fp32.h @@ -40,8 +40,8 @@ class MatmulCPUKernel : public MatmulBaseCPUKernel { int MallocMatrixABuffer(); int MallocMatrixBBuffer(); int InitBias(); - void InitMatrixA(float *src_ptr, float *dst_ptr); - void InitMatrixB(float *src_ptr, float *dst_ptr); + void InitMatrixA(const float *src_ptr, float *dst_ptr); + void InitMatrixB(const float *src_ptr, float *dst_ptr); void FreeTmpBuffer(); private: diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_1x1_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_1x1_int8.cc index dda5957e87..60e3679c4a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_1x1_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_1x1_int8.cc @@ -152,10 +152,11 @@ void Convolution1x1Int8CPUKernel::CheckSupportOptimize() { return; } -int Convolution1x1Int8CPUKernel::InitBiasByzp(void *src_weight, int input_channel, int output_channel, int round_oc) { +int Convolution1x1Int8CPUKernel::InitBiasByzp(const void *src_weight, int input_channel, int output_channel, + int round_oc) { /* bias = bias - v2 x zp1 + zp1 x zp2 */ int32_t *bias_data = reinterpret_cast(bias_data_); - int8_t *weight = reinterpret_cast(src_weight); + auto *weight = static_cast(src_weight); int32_t input_zp = conv_param_->conv_quant_arg_.input_quant_args_[0].zp_; for (int oc = 0; oc < output_channel; oc++) { int32_t weight_sum_value = 0; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_1x1_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_1x1_int8.h index 3eda1e9e7a..7ebe3d5b88 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_1x1_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_1x1_int8.h @@ -62,7 +62,7 @@ class Convolution1x1Int8CPUKernel : public ConvolutionBaseCPUKernel { int InitWeightBiasArm32(); void Pre1x1Trans(int8_t *src_input, int8_t *src_output); void CheckSupportOptimize(); - int InitBiasByzp(void *src_weight, int input_channel, int output_channel, int round_oc); + int InitBiasByzp(const void *src_weight, int input_channel, int output_channel, int round_oc); private: int32_t *input_sum_ = nullptr; /* per-oc */ diff --git a/mindspore/lite/src/runtime/kernel/arm/string/normalize.cc b/mindspore/lite/src/runtime/kernel/arm/string/normalize.cc index 159a22345b..cff856ba8f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/string/normalize.cc +++ b/mindspore/lite/src/runtime/kernel/arm/string/normalize.cc @@ -50,6 +50,9 @@ int NormalizeCPUKernel::Init() { int NormalizeCPUKernel::ReSize() { return RET_OK; } std::string NormalizeCPUKernel::Trim(const std::string &str, const std::string &whitespace /*= " \t\n\v\f\r"*/) { + if (str.empty()) { + return ""; + } auto begin = str.find_first_not_of(whitespace); auto end = str.find_last_not_of(whitespace); const auto range = end - begin + 1; diff --git a/mindspore/lite/src/runtime/kernel/opencl/utils.cc b/mindspore/lite/src/runtime/kernel/opencl/utils.cc index 6904c800f4..0d8f313b35 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/utils.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/utils.cc @@ -303,7 +303,7 @@ void PrintTensor(const lite::Tensor *tensor, MemType mem_type, int n, const std: printf("\n"); if (!out_file.empty()) { - WriteToBin(out_file, data.data(), data.size()); + (void)WriteToBin(out_file, data.data(), data.size()); } } diff --git a/mindspore/lite/src/runtime/thread_pool.c b/mindspore/lite/src/runtime/thread_pool.c index 45b8d84b90..09e592c78b 100644 --- a/mindspore/lite/src/runtime/thread_pool.c +++ b/mindspore/lite/src/runtime/thread_pool.c @@ -121,7 +121,7 @@ void FreeThread(ThreadList *thread_list, Thread *thread) { sem_post(&thread->sem); while (true) { if (thread != NULL && !thread->is_running) { - sem_destroy(&thread->sem); + (void)sem_destroy(&thread->sem); free(thread); thread = NULL; break; @@ -403,6 +403,7 @@ int SortCpuProcessor() { int err_code = SetArch(freq_set, gCoreNum); if (err_code != RET_TP_OK) { LOG_ERROR("set arch failed."); + return RET_TP_ERROR; } // sort core id by frequency into descending order for (int i = 0; i < gCoreNum; ++i) { @@ -470,7 +471,7 @@ int SetAffinity(pthread_t thread_id, cpu_set_t *cpuSet) { #else int ret = pthread_setaffinity_np(thread_id, sizeof(cpu_set_t), cpuSet); if (ret != RET_TP_OK) { - LOG_ERROR("set thread: %lu to cpu failed", thread_id); + LOG_ERROR("set thread: %d to cpu failed", thread_id); return RET_TP_SYSTEM_ERROR; } #endif // __APPLE__ @@ -803,8 +804,12 @@ ThreadPool *CreateThreadPool(int thread_num, int mode) { } #ifdef BIND_CORE if (run_once) { - SortCpuProcessor(); + int ret = SortCpuProcessor(); run_once = false; + if (ret != RET_TP_OK) { + LOG_ERROR("SortCpuProcessor failed"); + return NULL; + } } #endif ThreadPool *thread_pool = (struct ThreadPool *)(malloc(sizeof(ThreadPool)));