!9060 [MSLITE][Develop] fix code review

From: @sunsuodong
Reviewed-by: @zhang_xue_tong,@zhanghaibo5
Signed-off-by: @zhang_xue_tong
pull/9060/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit f8abc07891

@ -51,21 +51,21 @@ public class Model {
ret = this.modelPtr != 0;
} catch (IOException e) {
this.modelPtr = 0;
Log.e("MS_LITE", "Load model failed: " + e.getMessage());
Log.e("MS_LITE", "Load model failed");
ret = false;
} finally {
if (null != fis) {
try {
fis.close();
} catch (IOException e) {
Log.e("MS_LITE", "Close file failed: " + e.getMessage());
Log.e("MS_LITE", "Close file failed");
}
}
if (null != fileDescriptor) {
try {
fileDescriptor.close();
} catch (IOException e) {
Log.e("MS_LITE", "Close fileDescriptor failed: " + e.getMessage());
Log.e("MS_LITE", "Close fileDescriptor failed");
}
}
}

@ -24,9 +24,9 @@ void RowMajor2ColMajor(const float *src_ptr, float *dst_ptr, int row, int col) {
}
}
void RowMajor2Row4Major(float *src_ptr, float *dst_ptr, int row, int col) {
void RowMajor2Row4Major(const float *src_ptr, float *dst_ptr, int row, int col) {
for (int r = 0; r < row; r++) {
float *src = src_ptr + r * col;
const float *src = src_ptr + r * col;
for (int c = 0; c < col; c++) {
int cd8 = c / 4;
int cm8 = c % 4;
@ -36,9 +36,9 @@ void RowMajor2Row4Major(float *src_ptr, float *dst_ptr, int row, int col) {
return;
}
void RowMajor2Row8Major(float *src_ptr, float *dst_ptr, int row, int col) {
void RowMajor2Row8Major(const float *src_ptr, float *dst_ptr, int row, int col) {
for (int r = 0; r < row; r++) {
float *src = src_ptr + r * col;
const float *src = src_ptr + r * col;
for (int c = 0; c < col; c++) {
int cd8 = c / 8;
int cm8 = c % 8;
@ -48,9 +48,9 @@ void RowMajor2Row8Major(float *src_ptr, float *dst_ptr, int row, int col) {
return;
}
void RowMajor2Row12Major(float *src_ptr, float *dst_ptr, int row, int col) {
void RowMajor2Row12Major(const float *src_ptr, float *dst_ptr, int row, int col) {
for (int r = 0; r < row; r++) {
float *src = src_ptr + r * col;
const float *src = src_ptr + r * col;
for (int c = 0; c < col; c++) {
int cd8 = c / C12NUM;
int cm8 = c % C12NUM;
@ -60,18 +60,18 @@ void RowMajor2Row12Major(float *src_ptr, float *dst_ptr, int row, int col) {
return;
}
void RowMajor2Col12Major(float *src_ptr, float *dst_ptr, size_t row, size_t col) {
void RowMajor2Col12Major(const float *src_ptr, float *dst_ptr, size_t row, size_t col) {
size_t row_up_12 = UP_ROUND(row, C12NUM);
size_t row12 = row / C12NUM * C12NUM;
size_t col4 = col / C4NUM * C4NUM;
float *src_r = src_ptr;
const float *src_r = src_ptr;
float *dst_r = dst_ptr;
size_t ri = 0;
for (; ri < row12; ri += C12NUM) {
size_t ci = 0;
for (; ci < col4; ci += C4NUM) {
float *src_c = src_r + ci;
const float *src_c = src_r + ci;
float *dst_c = dst_r + ci * C12NUM;
/* 12x4 row-major to col-major */
@ -255,7 +255,7 @@ void RowMajor2Col12Major(float *src_ptr, float *dst_ptr, size_t row, size_t col)
#endif
}
for (; ci < col; ci++) {
float *src_c = src_r + ci;
const float *src_c = src_r + ci;
float *dst_c = dst_r + ci * C12NUM;
for (size_t i = 0; i < C12NUM; i++) {
dst_c[i] = src_c[i * col];
@ -282,7 +282,7 @@ void RowMajor2Col12Major(float *src_ptr, float *dst_ptr, size_t row, size_t col)
return;
}
void RowMajor2Col8Major(float *src_ptr, float *dst_ptr, size_t row, size_t col) {
void RowMajor2Col8Major(const float *src_ptr, float *dst_ptr, size_t row, size_t col) {
size_t row8 = row / C8NUM * C8NUM;
#ifdef ENABLE_ARM64
size_t col_skip = col / C8NUM * C8NUM;
@ -291,14 +291,14 @@ void RowMajor2Col8Major(float *src_ptr, float *dst_ptr, size_t row, size_t col)
size_t col_skip = col / C4NUM * C4NUM;
int skip_size = C4NUM;
#endif
float *src_r = src_ptr;
const float *src_r = src_ptr;
float *dst_r = dst_ptr;
size_t ri = 0;
for (; ri < row8; ri += C8NUM) {
size_t ci = 0;
for (; ci < col_skip; ci += skip_size) {
float *src_c = src_r + ci;
const float *src_c = src_r + ci;
float *dst_c = dst_r + ci * C8NUM;
#ifdef ENABLE_ARM64
@ -459,7 +459,7 @@ void RowMajor2Col8Major(float *src_ptr, float *dst_ptr, size_t row, size_t col)
#endif
}
for (; ci < col; ci++) {
float *src_c = src_r + ci;
const float *src_c = src_r + ci;
float *dst_c = dst_r + ci * C8NUM;
for (size_t i = 0; i < C8NUM; i++) {
dst_c[i] = src_c[i * col];
@ -478,17 +478,17 @@ void RowMajor2Col8Major(float *src_ptr, float *dst_ptr, size_t row, size_t col)
return;
}
void RowMajor2Col4Major(float *src_ptr, float *dst_ptr, size_t row, size_t col) {
void RowMajor2Col4Major(const float *src_ptr, float *dst_ptr, size_t row, size_t col) {
size_t row8 = row / C4NUM * C4NUM;
size_t col4 = col / C4NUM * C4NUM;
float *src_r = src_ptr;
const float *src_r = src_ptr;
float *dst_r = dst_ptr;
size_t ri = 0;
for (; ri < row8; ri += C4NUM) {
size_t ci = 0;
for (; ci < col4; ci += C4NUM) {
float *src_c = src_r + ci;
const float *src_c = src_r + ci;
float *dst_c = dst_r + ci * C4NUM;
/* 4x4 row-major to col-major */
@ -548,7 +548,7 @@ void RowMajor2Col4Major(float *src_ptr, float *dst_ptr, size_t row, size_t col)
#endif
}
for (; ci < col; ci++) {
float *src_c = src_r + ci;
const float *src_c = src_r + ci;
float *dst_c = dst_r + ci * C4NUM;
for (size_t i = 0; i < C4NUM; i++) {
dst_c[i] = src_c[i * col];

@ -30,12 +30,12 @@ void MatMulOpt(const float *a, const float *b, float *c, const float *bias, ActT
int col, size_t stride, int out_type);
void MatVecMul(const float *a, const float *b, float *c, const float *bias, ActType act_type, int depth, int col);
void RowMajor2ColMajor(const float *src_ptr, float *dst_ptr, int row, int col);
void RowMajor2Row4Major(float *src_ptr, float *dst_ptr, int row, int col);
void RowMajor2Row8Major(float *src_ptr, float *dst_ptr, int row, int col);
void RowMajor2Row12Major(float *src_ptr, float *dst_ptr, int row, int col);
void RowMajor2Col4Major(float *src_ptr, float *dst_ptr, size_t row, size_t col);
void RowMajor2Col8Major(float *src_ptr, float *dst_ptr, size_t row, size_t col);
void RowMajor2Col12Major(float *src_ptr, float *dst_ptr, size_t row, size_t col);
void RowMajor2Row4Major(const float *src_ptr, float *dst_ptr, int row, int col);
void RowMajor2Row8Major(const float *src_ptr, float *dst_ptr, int row, int col);
void RowMajor2Row12Major(const float *src_ptr, float *dst_ptr, int row, int col);
void RowMajor2Col4Major(const float *src_ptr, float *dst_ptr, size_t row, size_t col);
void RowMajor2Col8Major(const float *src_ptr, float *dst_ptr, size_t row, size_t col);
void RowMajor2Col12Major(const float *src_ptr, float *dst_ptr, size_t row, size_t col);
#ifdef ENABLE_ARM64
void MatmulFloatNeon64(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, int row,
int col, size_t stride, size_t writeNhwc, size_t WriteWino);

@ -55,6 +55,10 @@ int ScatterND::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> o
return RET_ERROR;
}
auto output = outputs_.front();
if (output == nullptr) {
MS_LOG(ERROR) << "output null pointer dereferencing.";
return RET_ERROR;
}
output->set_data_type(update->data_type());
output->set_format(update->format());
if (!infer_flag()) {

@ -37,6 +37,7 @@ int PoolingBaseCPUKernel::SetQuantParam() {
}
pooling_quant_arg_[0] = reinterpret_cast<QuantArg *>(malloc(sizeof(QuantArg)));
if (pooling_quant_arg_[0] == nullptr) {
pooling_quant_arg_[1] = nullptr;
MS_LOG(ERROR) << "malloc pooling_quant_arg[0] failed.";
free(pooling_quant_arg_);
pooling_quant_arg_ = nullptr;
@ -45,10 +46,10 @@ int PoolingBaseCPUKernel::SetQuantParam() {
pooling_quant_arg_[1] = reinterpret_cast<QuantArg *>(malloc(sizeof(QuantArg)));
if (pooling_quant_arg_[1] == nullptr) {
MS_LOG(ERROR) << "malloc pooling_quant_arg[1] failed.";
free(*pooling_quant_arg_);
free(pooling_quant_arg_[0]);
free(pooling_quant_arg_);
pooling_quant_arg_[0] = nullptr;
pooling_quant_arg_ = nullptr;
*this->pooling_quant_arg_ = nullptr;
return RET_MEMORY_FAILED;
}
auto *input_tensor = in_tensors_.at(kInputIndex);

@ -118,7 +118,7 @@ int FullconnectionCPUKernel::Init() {
return ReSize();
}
void FullconnectionCPUKernel::InitMatrixA(float *src_ptr, float *dst_ptr) {
void FullconnectionCPUKernel::InitMatrixA(const float *src_ptr, float *dst_ptr) {
if (is_vector_input_) {
memcpy(dst_ptr, src_ptr, fc_param_->deep_ * sizeof(float));
return;
@ -131,7 +131,7 @@ void FullconnectionCPUKernel::InitMatrixA(float *src_ptr, float *dst_ptr) {
#endif
}
void FullconnectionCPUKernel::InitMatrixB(float *src_ptr, float *dst_ptr) {
void FullconnectionCPUKernel::InitMatrixB(const float *src_ptr, float *dst_ptr) {
if (is_vector_input_) {
memcpy(dst_ptr, src_ptr, fc_param_->col_ * fc_param_->deep_ * sizeof(float));
return;

@ -43,8 +43,8 @@ class FullconnectionCPUKernel : public FullconnectionBaseCPUKernel {
void FreeBuf();
private:
void InitMatrixA(float *src_ptr, float *dst_ptr);
void InitMatrixB(float *src_ptr, float *dst_ptr);
void InitMatrixA(const float *src_ptr, float *dst_ptr);
void InitMatrixB(const float *src_ptr, float *dst_ptr);
private:
float *a_pack_ptr_ = nullptr;

@ -167,14 +167,14 @@ int MatmulCPUKernel::ReSize() {
return RET_OK;
}
void MatmulCPUKernel::InitMatrixA(float *src_ptr, float *dst_ptr) {
void MatmulCPUKernel::InitMatrixA(const float *src_ptr, float *dst_ptr) {
if (is_vector_a_) {
memcpy(dst_ptr, src_ptr, params_->batch * params_->deep_ * sizeof(float));
return;
}
for (int i = 0; i < params_->batch; i++) {
float *src = src_ptr + i * params_->deep_ * params_->row_;
const float *src = src_ptr + i * params_->deep_ * params_->row_;
#if defined(ENABLE_ARM32) || defined(ENABLE_X86_64_SSE)
float *dst = dst_ptr + i * params_->deep_ * params_->row_4_;
if (params_->a_transpose_) {
@ -194,13 +194,13 @@ void MatmulCPUKernel::InitMatrixA(float *src_ptr, float *dst_ptr) {
return;
}
void MatmulCPUKernel::InitMatrixB(float *src_ptr, float *dst_ptr) {
void MatmulCPUKernel::InitMatrixB(const float *src_ptr, float *dst_ptr) {
if (is_vector_a_) {
if (params_->b_transpose_) {
memcpy(dst_ptr, src_ptr, params_->batch * params_->col_ * params_->deep_ * sizeof(float));
} else {
for (int i = 0; i < params_->batch; i++) {
float *src = src_ptr + i * params_->deep_ * params_->col_;
const float *src = src_ptr + i * params_->deep_ * params_->col_;
float *dst = dst_ptr + i * params_->deep_ * params_->col_;
RowMajor2ColMajor(src, dst, params_->deep_, params_->col_);
}
@ -209,7 +209,7 @@ void MatmulCPUKernel::InitMatrixB(float *src_ptr, float *dst_ptr) {
}
for (int i = 0; i < params_->batch; i++) {
float *src = src_ptr + i * params_->deep_ * params_->col_;
const float *src = src_ptr + i * params_->deep_ * params_->col_;
float *dst = dst_ptr + i * params_->deep_ * params_->col_8_;
if (params_->b_transpose_) {
RowMajor2Col8Major(src, dst, params_->col_, params_->deep_);

@ -40,8 +40,8 @@ class MatmulCPUKernel : public MatmulBaseCPUKernel {
int MallocMatrixABuffer();
int MallocMatrixBBuffer();
int InitBias();
void InitMatrixA(float *src_ptr, float *dst_ptr);
void InitMatrixB(float *src_ptr, float *dst_ptr);
void InitMatrixA(const float *src_ptr, float *dst_ptr);
void InitMatrixB(const float *src_ptr, float *dst_ptr);
void FreeTmpBuffer();
private:

@ -152,10 +152,11 @@ void Convolution1x1Int8CPUKernel::CheckSupportOptimize() {
return;
}
int Convolution1x1Int8CPUKernel::InitBiasByzp(void *src_weight, int input_channel, int output_channel, int round_oc) {
int Convolution1x1Int8CPUKernel::InitBiasByzp(const void *src_weight, int input_channel, int output_channel,
int round_oc) {
/* bias = bias - v2 x zp1 + zp1 x zp2 */
int32_t *bias_data = reinterpret_cast<int32_t *>(bias_data_);
int8_t *weight = reinterpret_cast<int8_t *>(src_weight);
auto *weight = static_cast<const int8_t *>(src_weight);
int32_t input_zp = conv_param_->conv_quant_arg_.input_quant_args_[0].zp_;
for (int oc = 0; oc < output_channel; oc++) {
int32_t weight_sum_value = 0;

@ -62,7 +62,7 @@ class Convolution1x1Int8CPUKernel : public ConvolutionBaseCPUKernel {
int InitWeightBiasArm32();
void Pre1x1Trans(int8_t *src_input, int8_t *src_output);
void CheckSupportOptimize();
int InitBiasByzp(void *src_weight, int input_channel, int output_channel, int round_oc);
int InitBiasByzp(const void *src_weight, int input_channel, int output_channel, int round_oc);
private:
int32_t *input_sum_ = nullptr; /* per-oc */

@ -50,6 +50,9 @@ int NormalizeCPUKernel::Init() {
int NormalizeCPUKernel::ReSize() { return RET_OK; }
std::string NormalizeCPUKernel::Trim(const std::string &str, const std::string &whitespace /*= " \t\n\v\f\r"*/) {
if (str.empty()) {
return "";
}
auto begin = str.find_first_not_of(whitespace);
auto end = str.find_last_not_of(whitespace);
const auto range = end - begin + 1;

@ -303,7 +303,7 @@ void PrintTensor(const lite::Tensor *tensor, MemType mem_type, int n, const std:
printf("\n");
if (!out_file.empty()) {
WriteToBin(out_file, data.data(), data.size());
(void)WriteToBin(out_file, data.data(), data.size());
}
}

@ -121,7 +121,7 @@ void FreeThread(ThreadList *thread_list, Thread *thread) {
sem_post(&thread->sem);
while (true) {
if (thread != NULL && !thread->is_running) {
sem_destroy(&thread->sem);
(void)sem_destroy(&thread->sem);
free(thread);
thread = NULL;
break;
@ -403,6 +403,7 @@ int SortCpuProcessor() {
int err_code = SetArch(freq_set, gCoreNum);
if (err_code != RET_TP_OK) {
LOG_ERROR("set arch failed.");
return RET_TP_ERROR;
}
// sort core id by frequency into descending order
for (int i = 0; i < gCoreNum; ++i) {
@ -470,7 +471,7 @@ int SetAffinity(pthread_t thread_id, cpu_set_t *cpuSet) {
#else
int ret = pthread_setaffinity_np(thread_id, sizeof(cpu_set_t), cpuSet);
if (ret != RET_TP_OK) {
LOG_ERROR("set thread: %lu to cpu failed", thread_id);
LOG_ERROR("set thread: %d to cpu failed", thread_id);
return RET_TP_SYSTEM_ERROR;
}
#endif // __APPLE__
@ -803,8 +804,12 @@ ThreadPool *CreateThreadPool(int thread_num, int mode) {
}
#ifdef BIND_CORE
if (run_once) {
SortCpuProcessor();
int ret = SortCpuProcessor();
run_once = false;
if (ret != RET_TP_OK) {
LOG_ERROR("SortCpuProcessor failed");
return NULL;
}
}
#endif
ThreadPool *thread_pool = (struct ThreadPool *)(malloc(sizeof(ThreadPool)));

Loading…
Cancel
Save