diff --git a/mindspore/lite/src/ops/argmax.cc b/mindspore/lite/src/ops/argmax.cc index e44ed1615d..dd2550f813 100644 --- a/mindspore/lite/src/ops/argmax.cc +++ b/mindspore/lite/src/ops/argmax.cc @@ -62,6 +62,7 @@ int ArgMax::InferShape(std::vector inputs_, std::vector outp MS_ASSERT(output != nullptr); if (inputs_.size() != kSingleNum || outputs_.size() != kSingleNum) { MS_LOG(ERROR) << "tensor number is error."; + return RET_ERROR; } output->SetFormat(input->GetFormat()); diff --git a/mindspore/lite/src/ops/deconv2d.cc b/mindspore/lite/src/ops/deconv2d.cc index 8f3da2bceb..cbd868a534 100644 --- a/mindspore/lite/src/ops/deconv2d.cc +++ b/mindspore/lite/src/ops/deconv2d.cc @@ -217,6 +217,7 @@ int DeConv2D::InferShape(std::vector inputs_, std::vector out_shape = {output_n, output_h, output_w, output_c}; output->set_shape(out_shape); @@ -230,6 +231,7 @@ int DeConv2D::InferShape(std::vector inputs_, std::vectorMutableData(); auto output_data = out_tensors_.at(0)->MutableData(); - auto in_tensor = in_tensors_.at(0)->shape(); - auto shape = reinterpret_cast(malloc(in_tensor.size() * sizeof(int))); - if (shape == nullptr) { - MS_LOG(ERROR) << "malloc shape failed."; - return RET_ERROR; - } - memcpy(shape, in_tensor.data(), in_tensor.size() * sizeof(int)); + auto shape = in_tensors_.at(0)->shape(); auto param = reinterpret_cast(op_parameter_); MS_ASSERT(context_->allocator != nullptr); @@ -89,7 +83,7 @@ int ArgMinMaxBaseCPUKernel::Run() { return RET_ERROR; } } - ArgMinMax(input_data, output_data, reinterpret_cast(shape), param); + ArgMinMax(input_data, output_data, reinterpret_cast(shape.data()), param); context_->allocator->Free(param->arg_elements_); param->arg_elements_ = nullptr; return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.cc index 5e9b629659..eb5b338a3b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.cc @@ -59,25 +59,25 @@ int ArgMinMaxInt8CPUKernel::Run() { const int8_t *input_data = reinterpret_cast(in_tensors_.at(0)->MutableData()); int8_t *output_data = reinterpret_cast(out_tensors_.at(0)->MutableData()); - auto in_shape = input->shape().data(); + auto in_shape = input->shape(); auto param = reinterpret_cast(op_parameter_); if (param->topk_ == 1) { - Int8ArgMinMaxQuant(input_data, output_data, in_shape, param, &in_quant_arg_, &out_quant_arg_); + Int8ArgMinMaxQuant(input_data, output_data, in_shape.data(), param, &in_quant_arg_, &out_quant_arg_); return RET_OK; } switch (param->axis_) { case 0: - Int8ArgMinMaxDim0(input_data, output_data, in_shape, param, &in_quant_arg_, &out_quant_arg_); + Int8ArgMinMaxDim0(input_data, output_data, in_shape.data(), param, &in_quant_arg_, &out_quant_arg_); break; case 1: - Int8ArgMinMaxDim1(input_data, output_data, in_shape, param, &in_quant_arg_, &out_quant_arg_); + Int8ArgMinMaxDim1(input_data, output_data, in_shape.data(), param, &in_quant_arg_, &out_quant_arg_); break; case 2: - Int8ArgMinMaxDim2(input_data, output_data, in_shape, param, &in_quant_arg_, &out_quant_arg_); + Int8ArgMinMaxDim2(input_data, output_data, in_shape.data(), param, &in_quant_arg_, &out_quant_arg_); break; case 3: - Int8ArgMinMaxDim3(input_data, output_data, in_shape, param, &in_quant_arg_, &out_quant_arg_); + Int8ArgMinMaxDim3(input_data, output_data, in_shape.data(), param, &in_quant_arg_, &out_quant_arg_); break; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.cc index 0f351fbb7b..25f66da284 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.cc @@ -22,6 +22,7 @@ using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::RET_ERROR; +using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_OK; namespace mindspore::kernel { @@ -43,18 +44,58 @@ int CropInt8CPUKernel::Init() { crop_para_->quant_arg.output_activation_max_ = std::numeric_limits::max(); crop_para_->quant_arg.output_activation_min_ = std::numeric_limits::min(); + crop_para_->in_shape_ = reinterpret_cast(malloc(input_tensor->shape().size() * sizeof(int))); + if (crop_para_->in_shape_ == nullptr) { + MS_LOG(ERROR) << "malloc memory failed"; + return RET_MEMORY_FAILED; + } + crop_para_->out_shape_ = reinterpret_cast(malloc(out_tensor->shape().size() * sizeof(int))); + if (crop_para_->out_shape_ == nullptr) { + MS_LOG(ERROR) << "malloc memory failed"; + return RET_MEMORY_FAILED; + } if (!InferShapeDone()) { return RET_OK; } return ReSize(); } +CropInt8CPUKernel::~CropInt8CPUKernel() { + if (crop_para_->in_shape_ != nullptr) { + free(const_cast(crop_para_->in_shape_)); + crop_para_->in_shape_ = nullptr; + } + + if (crop_para_->out_shape_ != nullptr) { + free(const_cast(crop_para_->out_shape_)); + crop_para_->out_shape_ = nullptr; + } +} + int CropInt8CPUKernel::ReSize() { auto *input_tensor = in_tensors_.at(kInputIndex); - crop_para_->in_shape_ = input_tensor->shape().data(); + auto input_shape = input_tensor->shape(); + size_t input_dim = input_shape.size(); + + if (crop_para_->in_shape_ == nullptr) { + MS_LOG(ERROR) << "in_shape_ is nullptr"; + return RET_ERROR; + } else { + memcpy(reinterpret_cast(const_cast(crop_para_->in_shape_)), input_shape.data(), + sizeof(int) * input_dim); + } + auto *out_tensor = out_tensors_.at(kOutputIndex); - crop_para_->out_shape_ = out_tensor->shape().data(); - auto input_dim = input_tensor->shape().size(); + auto output_shape = out_tensor->shape(); + size_t output_dim = output_shape.size(); + + if (crop_para_->out_shape_ == nullptr) { + MS_LOG(ERROR) << "out_shape_ is nullptr"; + return RET_ERROR; + } else { + memcpy(reinterpret_cast(const_cast(crop_para_->out_shape_)), output_shape.data(), + sizeof(int) * output_dim); + } MS_ASSERT(input_dim <= CROP_OFFSET_MAX_SIZE); crop_para_->input_dim_ = input_dim; PadOffset(input_dim, crop_para_); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.h index 5e10639efa..faef49b562 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.h @@ -35,7 +35,7 @@ class CropInt8CPUKernel : public CropBaseCPUKernel { crop_para_ = reinterpret_cast(op_parameter_); crop_para_->thread_count_ = op_parameter_->thread_num_; } - ~CropInt8CPUKernel() = default; + ~CropInt8CPUKernel(); int Init() override; int ReSize() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.cc index 5607aa6f50..004f407f8a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.cc @@ -64,6 +64,17 @@ int LeakyReluInt8CPUKernel::Init() { quant_prelu_parm_.quant_arg.output_activation_max_ = std::numeric_limits::max(); quant_prelu_parm_.quant_arg.output_activation_min_ = std::numeric_limits::min(); + + quant_prelu_parm_.in_shape_ = reinterpret_cast(malloc(input_tensor->shape().size() * sizeof(int))); + if (quant_prelu_parm_.in_shape_ == nullptr) { + MS_LOG(ERROR) << "malloc memory failed"; + return RET_MEMORY_FAILED; + } + quant_prelu_parm_.out_shape_ = reinterpret_cast(malloc(out_tensor->shape().size() * sizeof(int))); + if (quant_prelu_parm_.out_shape_ == nullptr) { + MS_LOG(ERROR) << "malloc memory failed"; + return RET_MEMORY_FAILED; + } if (!InferShapeDone()) { return RET_OK; } @@ -79,6 +90,14 @@ LeakyReluInt8CPUKernel::~LeakyReluInt8CPUKernel() { free(input_quant_); input_quant_ = nullptr; } + if (quant_prelu_parm_.in_shape_ != nullptr) { + free(const_cast(quant_prelu_parm_.in_shape_)); + quant_prelu_parm_.in_shape_ = nullptr; + } + if (quant_prelu_parm_.out_shape_ != nullptr) { + free(const_cast(quant_prelu_parm_.out_shape_)); + quant_prelu_parm_.out_shape_ = nullptr; + } } int LeakyReluInt8CPUKernel::ReSize() { @@ -92,10 +111,26 @@ int LeakyReluInt8CPUKernel::ReSize() { } quant_prelu_parm_.input_dim_ = input_dim; quant_prelu_parm_.element_num = in_tensors_[0]->Size(); - quant_prelu_parm_.in_shape_ = input_tensor->shape().data(); - quant_prelu_parm_.out_shape_ = out_tensor->shape().data(); + auto input_shape = input_tensor->shape(); + if (quant_prelu_parm_.in_shape_ == nullptr) { + MS_LOG(ERROR) << "in_shape_ is nullptr"; + return RET_ERROR; + } else { + memcpy(reinterpret_cast(const_cast(quant_prelu_parm_.in_shape_)), input_shape.data(), + sizeof(int) * input_dim); + } + auto output_shape = out_tensor->shape(); + size_t output_dim = output_shape.size(); + if (quant_prelu_parm_.out_shape_ == nullptr) { + MS_LOG(ERROR) << "out_shape_ is nullptr"; + return RET_ERROR; + } else { + memcpy(reinterpret_cast(const_cast(quant_prelu_parm_.out_shape_)), output_shape.data(), + sizeof(int) * output_dim); + } input_quant_ = static_cast(malloc(sizeof(QuantArg) * input_dim)); if (input_quant_ == nullptr) { + MS_LOG(ERROR) << "malloc memory failed"; return RET_MEMORY_FAILED; } return RET_OK;