mem check and fix

pull/4769/head
kai00 5 years ago
parent 0aeaa7f06f
commit d7efb5c270

@ -23,7 +23,11 @@ extern "C" JNIEXPORT jlong JNICALL Java_com_mindspore_lite_context_Context_creat
jint device_type,
jint thread_num,
jint cpu_bind_mode) {
auto *context = new mindspore::lite::Context();
auto *context = new (std::nothrow) mindspore::lite::Context();
if (context == nullptr) {
MS_LOG(ERROR) << "new Context fail!";
return (jlong)context;
}
switch (device_type) {
case 0:
context->device_ctx_.type = mindspore::lite::DT_CPU;

@ -172,7 +172,11 @@ int ConvolutionSWFP16CPUKernel::ReSize() {
memset(nhwc4_input_, 0, nhwc4_input_size);
// init sliding window param
slidingWindow_param_ = new SlidingWindowParam;
slidingWindow_param_ = new (std::nothrow) SlidingWindowParam;
if (slidingWindow_param_ == nullptr) {
MS_LOG(ERROR) << "new SlidingWindowParam fail!";
return RET_ERROR;
}
InitSlidingParamConv(slidingWindow_param_, conv_param_, C4NUM);
return RET_OK;
}

@ -166,7 +166,12 @@ int ConvolutionWinogradFP16CPUKernel::MallocFilterMatrix(int oc_block, int oc_bl
return RET_ERROR;
}
memset(matrix_buffer, 0, trans_matrix_data_size);
trans_weight_ = new Matrix();
trans_weight_ = new (std::nothrow) Matrix();
if (trans_weight_ == nullptr) {
MS_LOG(ERROR) << "new Matrix fail!";
free(matrix_buffer);
return RET_ERROR;
}
trans_weight_->SetData(matrix_buffer);
trans_weight_->SetNDim(5);

@ -130,7 +130,11 @@ int DeconvolutionDepthwiseFp16CPUKernel::Init() {
int DeconvolutionDepthwiseFp16CPUKernel::ReSize() {
FreeTmpBuffer();
sliding_ = new SlidingWindowParam;
sliding_ = new (std::nothrow) SlidingWindowParam;
if (sliding_ == nullptr) {
MS_LOG(ERROR) << "new SlidingWindowParam fail!";
return RET_ERROR;
}
InitSlideParam();
// conv base init
auto ret = ConvolutionBaseCPUKernel::Init();

@ -99,7 +99,10 @@ kernel::LiteKernel *CpuArithmeticSelfFp32KernelCreator(const std::vector<lite::t
return nullptr;
}
auto *kernel = new (std::nothrow) ArithmeticSelfCPUKernel(opParameter, inputs, outputs, ctx, primitive);
MS_ASSERT(kernel != nullptr);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new ArithmeticSelfCPUKernel fail!";
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "

@ -204,14 +204,6 @@ kernel::LiteKernel *CpuConvDwFp32KernelCreator(const std::vector<lite::tensor::T
MS_ASSERT(desc.type == schema::PrimitiveType_DepthwiseConv2D);
kernel::LiteKernel *kernel;
kernel = new (std::nothrow) kernel::ConvolutionDepthwiseCPUKernel(opParameter, inputs, outputs, ctx, primitive);
// auto param = reinterpret_cast<ConvParameter *>(opParameter);
// if (param->kernel_h_ == 3 && param->kernel_w_ == 3 && param->stride_h_ == 1 && param->stride_w_ == 1 &&
// param->dilation_h_ == 1 && param->dilation_w_ == 1) {
// kernel = new (std::nothrow) kernel::ConvolutionDepthwise3x3CPUKernel(opParameter, inputs, outputs, ctx,
// primitive);
// } else {
// kernel = new (std::nothrow) kernel::ConvolutionDepthwiseCPUKernel(opParameter, inputs, outputs, ctx, primitive);
// }
if (kernel == nullptr) {
MS_LOG(ERROR) << "kernel is nullptr.";

@ -142,7 +142,11 @@ int ConvolutionSWCPUKernel::ReSize() {
memset(nhwc4_input_, 0, nhwc4_input_size);
// init sliding window param
slidingWindow_param_ = new SlidingWindowParam;
slidingWindow_param_ = new (std::nothrow) SlidingWindowParam;
if (slidingWindow_param_ == nullptr) {
MS_LOG(ERROR) << "new SlidingWindowParam fail!";
return RET_ERROR;
}
InitSlidingParamConv(slidingWindow_param_, conv_param_, C4NUM);
return RET_OK;

@ -145,7 +145,12 @@ int ConvolutionWinogradCPUKernel::MallocFilterMatrix(int oc_block, int oc_block_
return RET_ERROR;
}
memset(matrix_buffer, 0, trans_matrix_data_size);
trans_weight_ = new Matrix();
trans_weight_ = new (std::nothrow) Matrix();
if (trans_weight_ == nullptr) {
MS_LOG(ERROR) << "new Matrix fail!";
free(matrix_buffer);
return RET_ERROR;
}
trans_weight_->SetData(matrix_buffer);
trans_weight_->SetNDim(5);

@ -97,7 +97,10 @@ kernel::LiteKernel *CpuActivationGradFp32KernelCreator(const std::vector<lite::t
MS_ASSERT(opParameter != nullptr);
MS_ASSERT(desc.type == schema::PrimitiveType_ActivationGrad);
auto *kernel = new (std::nothrow) ActivationGradCPUKernel(opParameter, inputs, outputs, ctx, primitive);
MS_ASSERT(kernel != nullptr);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new ActivationGradCPUKernel fail!";
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "InferShape kernel failed, name: " << opParameter->name_ << ", type: "

@ -120,13 +120,27 @@ int ArithmeticGradCPUKernel::InferShape() {
arithmeticParameter_->out_shape_[i] = outShape[i];
}
}
tile_data0 = new (std::nothrow) float[inputs_.at(0)->ElementsNum()];
MS_ASSERT(tile_data0 != nullptr);
if (tile_data0 == nullptr) {
MS_LOG(ERROR) << "new data0 fail!";
return RET_ERROR;
}
tile_data1 = new (std::nothrow) float[inputs_.at(0)->ElementsNum()];
MS_ASSERT(tile_data1 != nullptr);
if (tile_data1 == nullptr) {
MS_LOG(ERROR) << "new data1 fail!";
delete tile_data0;
return RET_ERROR;
}
if (type() == PrimitiveType_DivGrad) {
tile_data2 = new (std::nothrow) float[inputs_.at(0)->ElementsNum()];
MS_ASSERT(tile_data2 != nullptr);
if (tile_data2 == nullptr) {
MS_LOG(ERROR) << "new data2 fail!";
delete tile_data0;
delete tile_data1;
return RET_ERROR;
}
}
}

@ -99,7 +99,10 @@ kernel::LiteKernel *CpuBNGradInputFp32KernelCreator(const std::vector<lite::tens
// parameter->name = opDef.name()->str().data();
// parameter->type = opDef.attr_type();
auto *kernel = new (std::nothrow) BNGradInputCPUKernel(opParameter, inputs, outputs, ctx, primitive);
MS_ASSERT(kernel != nullptr);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new BNGradInputCPUKernel fail!";
return nullptr;
}
auto ret = kernel->Init();
if (RET_OK != ret) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "

@ -141,7 +141,10 @@ kernel::LiteKernel *CpuConvGradFilterFp32KernelCreator(const std::vector<lite::t
MS_ASSERT(desc.type == schema::PrimitiveType_Conv2DGradFilter);
auto *kernel = new (std::nothrow) ConvolutionGradFilterCPUKernel(opParameter, inputs, outputs, ctx, primitive);
MS_ASSERT(kernel != nullptr);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new kernel fail!";
return nullptr;
}
auto ret = kernel->Init();
if (RET_OK != ret) {

@ -63,7 +63,11 @@ int ConvolutionGradInputCPUKernel::Init() {
int ws_size = conv_param->output_h_ * conv_param->output_w_ * conv_param->kernel_h_ * conv_param->kernel_w_ *
conv_param->input_channel_ / conv_param->group_;
workspace = new float[ws_size];
workspace = new (std::nothrow) float[ws_size];
if (workspace == nullptr) {
MS_LOG(ERROR) << "new workspace fail!";
return RET_ERROR;
}
return 0;
}
@ -121,7 +125,10 @@ kernel::LiteKernel *CpuConvGradInputFp32KernelCreator(const std::vector<lite::te
MS_ASSERT(desc.type == schema::PrimitiveType_Conv2DGradInput);
auto *kernel = new (std::nothrow) ConvolutionGradInputCPUKernel(opParameter, inputs, outputs, ctx, primitive);
MS_ASSERT(kernel != nullptr);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new kernel fail!";
return nullptr;
}
auto ret = kernel->Init();
if (0 != ret) {

@ -122,8 +122,13 @@ kernel::LiteKernel *CpuArithmeticSelfInt8KernelCreator(const std::vector<lite::t
MS_LOG(ERROR) << "Creator failed, opParameter is nullptr!";
return nullptr;
}
auto *kernel = new (std::nothrow) ArithmeticSelfInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
MS_ASSERT(kernel != nullptr);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new ArithmeticSelfInt8CPUKernel fail!";
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "

@ -151,7 +151,12 @@ int DeconvolutionDepthwiseInt8CPUKernel::Init() {
int DeconvolutionDepthwiseInt8CPUKernel::ReSize() {
FreeTmpBuffer();
sliding = new SlidingWindowParam;
sliding = new (std::nothrow) SlidingWindowParam;
if (sliding == nullptr) {
MS_LOG(ERROR) << "new SlidingWindowParam fail!";
return RET_ERROR;
}
InitSlideParam();
// conv base init

@ -108,7 +108,11 @@ void DeConvInt8CPUKernel::CheckSupportOptimize() {
}
int DeConvInt8CPUKernel::InitParam() {
matmul_param_ = new MatMulParameter();
matmul_param_ = new (std::nothrow) MatMulParameter();
if (matmul_param_ == nullptr) {
MS_LOG(ERROR) << "new MatMulParameter fail!";
return RET_ERROR;
}
matmul_param_->row_ = conv_param_->input_h_ * conv_param_->input_w_;
matmul_param_->deep_ = conv_param_->input_channel_;
matmul_param_->col_ = conv_param_->output_channel_ * conv_param_->kernel_h_ * conv_param_->kernel_w_;
@ -121,6 +125,7 @@ int DeConvInt8CPUKernel::InitParam() {
} else {
/*todo */
}
return RET_OK;
}

@ -319,6 +319,4 @@ TEST_F(CropTestFp32, CropTest11) {
std::cout << "\n";
CompareOutputData(output, expect_out, kOutSize, 0.000001);
}
} // namespace mindspore

@ -90,6 +90,7 @@ kernel::ActivationOpenClKernel *create_kernel(lite::opencl::OpenCLAllocator *all
if (kernel == nullptr) {
delete param;
MS_LOG(ERROR) << "Kernel:" << test_name << " create fail.";
delete param;
return nullptr;
}
auto ret = kernel->Init();
@ -97,6 +98,8 @@ kernel::ActivationOpenClKernel *create_kernel(lite::opencl::OpenCLAllocator *all
delete param;
delete kernel;
MS_LOG(ERROR) << "Init " << test_name << " fail.";
delete kernel;
delete param;
return nullptr;
}
MS_LOG(INFO) << "Initialize input data";

@ -141,7 +141,7 @@ void TestCase(const std::vector<int> &shape_a, const std::vector<int> &shape_b)
delete kernel;
delete arith_kernel;
delete param;
for (auto tensor : inputs) {
delete tensor;
}

@ -123,6 +123,7 @@ TEST_F(TestAvgPoolingOpenCL, AvgPoolFp32) {
delete tensor_out;
delete pooling_kernel;
delete pGraph;
delete param;
lite::opencl::OpenCLRuntime::DeleteInstance();
}

@ -203,6 +203,7 @@ TEST_F(ConvScaleFusionTest, TestConvScaleNode) {
for (auto &cnode : new_meta_graph->nodes) {
ASSERT_EQ(cnode->primitive->value.AsConv2D()->hasBias, true);
}
delete anf_transform;
}
TEST_F(ConvScaleFusionTest, TestDeptiwiseConvScaleNode) {
@ -217,5 +218,6 @@ TEST_F(ConvScaleFusionTest, TestDeptiwiseConvScaleNode) {
ASSERT_EQ(cnode->primitive->value.AsDepthwiseConv2D()->hasBias, true);
ASSERT_EQ(cnode->inputIndex.size(), 3);
}
delete anf_transform;
}
} // namespace mindspore

Loading…
Cancel
Save