check if new is nullpter

pull/5620/head
lyvette 5 years ago
parent 24f00cc6dc
commit c5bb6eaac8

@ -19,7 +19,9 @@
#include "utils/log_adapter.h"
namespace mindspore::lite {
std::shared_ptr<Allocator> Allocator::Create() { return std::shared_ptr<Allocator>(new DefaultAllocator()); }
std::shared_ptr<Allocator> Allocator::Create() {
return std::shared_ptr<Allocator>(new (std::nothrow) DefaultAllocator());
}
DefaultAllocator::DefaultAllocator() {}

@ -19,7 +19,11 @@
namespace mindspore::kernel {
Matrix *TransformMatrixGenerator(int m, int k) {
auto matrix = new Matrix;
auto matrix = new (std::nothrow) Matrix;
if (matrix == nullptr) {
MS_LOG(ERROR) << "matrix is nullptr.";
return nullptr;
}
auto data = malloc(m * k * sizeof(float));
if (data == nullptr) {
MS_LOG(ERROR) << "Malloc matrix data failed.";

@ -66,7 +66,7 @@ kernel::LiteKernel *CpuSoftmaxInt8KernelCreator(const std::vector<lite::tensor::
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_SoftMax);
SoftmaxInt8CPUKernel *kernel = new (std::nothrow) SoftmaxInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
auto *kernel = new (std::nothrow) SoftmaxInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new SoftmaxCPUKernel fail!";
return nullptr;
@ -91,7 +91,7 @@ kernel::LiteKernel *CpuSoftmaxFp32KernelCreator(const std::vector<lite::tensor::
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_SoftMax);
SoftmaxCPUKernel *kernel = new (std::nothrow) SoftmaxCPUKernel(opParameter, inputs, outputs, ctx, primitive);
auto *kernel = new (std::nothrow) SoftmaxCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new SoftmaxCPUKernel fail!";
return nullptr;

@ -36,7 +36,10 @@ class DeConvolutionFp16CPUKernel : public ConvolutionBaseFP16CPUKernel {
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx,
const mindspore::lite::PrimitiveC *primitive)
: ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive) {
matmul_param_ = new MatMulParameter();
matmul_param_ = new (std::nothrow) MatMulParameter();
if (matmul_param_ == nullptr) {
MS_LOG(ERROR) << "new MatMulParameter fail!";
}
}
~DeConvolutionFp16CPUKernel() override;
int Init() override;

@ -37,7 +37,10 @@ class Convolution1x1CPUKernel : public ConvolutionBaseCPUKernel {
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx,
const mindspore::lite::PrimitiveC *primitive)
: ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {
matmul_param_ = new MatMulParameter();
matmul_param_ = new (std::nothrow) MatMulParameter();
if (matmul_param_ == nullptr) {
MS_LOG(ERROR) << "new MatMulParameter fail!";
}
}
~Convolution1x1CPUKernel();
int Init() override;

@ -282,7 +282,11 @@ kernel::LiteKernel *CpuArithmeticGradFp32KernelCreator(const std::vector<lite::t
return nullptr;
}
auto *kernel = new (std::nothrow) ArithmeticGradCPUKernel(opParameter, inputs, outputs, ctx, primitive);
MS_ASSERT(kernel != nullptr);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new ArithmeticGradCPUKernel fail!";
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "

@ -103,7 +103,10 @@ kernel::LiteKernel *CpuBiasGradFp32KernelCreator(const std::vector<lite::tensor:
MS_ASSERT(desc.type == schema::PrimitiveType_BiasGrad);
auto *kernel =
new (std::nothrow) BiasGradCPUKernel(reinterpret_cast<OpParameter *>(opParameter), inputs, outputs, ctx, primitive);
MS_ASSERT(kernel != nullptr);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new BiasGradCPUKernel fail!";
return nullptr;
}
auto ret = kernel->Init();
if (RET_OK != ret) {

@ -33,7 +33,11 @@ namespace mindspore::kernel {
int BNGradInputCPUKernel::Init() {
auto bn_param = reinterpret_cast<bnParameter *>(opParameter);
workspace_size = 5 * bn_param->channels;
workspace = new float[workspace_size];
workspace = new (std::nothrow) float[workspace_size];
if (workspace == nullptr) {
MS_LOG(ERROR) << "new workspace fail!";
return RET_ERROR;
}
if (2 != this->inputs_.size()) {
MS_LOG(ERROR) << "Conv2d Grad should has 2 inputs";

@ -61,7 +61,11 @@ int ConvolutionGradFilterCPUKernel::Init() {
int ws_size = conv_param->output_h_ * conv_param->output_w_ * conv_param->kernel_h_ * conv_param->kernel_w_ *
conv_param->input_channel_ / conv_param->group_;
workspace = new float[ws_size];
workspace = new (std::nothrow) float[ws_size];
if (workspace == nullptr) {
MS_LOG(ERROR) << "new workspace fail!";
return RET_ERROR;
}
int output_w = 0;
int output_h = 0;

@ -68,7 +68,10 @@ kernel::LiteKernel *CpuOptMomentumFp32KernelCreator(const std::vector<lite::tens
const mindspore::lite::PrimitiveC *primitive) {
MS_ASSERT(desc.type == schema::PrimitiveType_OptMomentum);
auto *kernel = new (std::nothrow) OptMomentumCPUKernel(opParameter, inputs, outputs, ctx, primitive);
MS_ASSERT(kernel != nullptr);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new OptMomentumCPUKernel fail!";
return nullptr;
}
auto ret = kernel->Init();
if (0 != ret) {

@ -181,7 +181,11 @@ kernel::LiteKernel *CpuPoolingGradFp32KernelCreator(const std::vector<lite::tens
MS_ASSERT(desc.type == schema::PrimitiveType_PoolingGrad);
auto *kernel = new (std::nothrow) PoolingGradCPUKernel(opParameter, inputs, outputs, ctx, primitive);
MS_ASSERT(kernel != nullptr);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new PoolingGradCPUKernel fail!";
return nullptr;
}
auto ret = kernel->Init();
if (RET_OK != ret) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "

@ -55,6 +55,11 @@ kernel::LiteKernel *CpuPowerGradFp32KernelCreator(const std::vector<lite::tensor
MS_ASSERT(opParameter != nullptr);
MS_ASSERT(desc.type == schema::PrimitiveType_PowerGrad);
auto *kernel = new (std::nothrow) PowerGradCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new PowerGradCPUKernel fail!";
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "

@ -79,7 +79,11 @@ int SparseSoftmaxCrossEntropyWithLogitsCPUKernel::Run() {
}
size_t data_size = inputs_.at(0)->ElementsNum();
float *losses = new (std::nothrow) float[data_size];
MS_ASSERT(losses != nullptr);
if (losses == nullptr) {
MS_LOG(ERROR) << "losses is null";
return nullptr;
}
std::fill(losses, losses + data_size, 0);
MS_ASSERT(out != nullptr);

@ -29,6 +29,11 @@ namespace mindspore::kernel {
DeConvInt8CPUKernel::~DeConvInt8CPUKernel() {
FreeTmpBuffer();
ConvolutionBaseCPUKernel::FreeQuantParam();
if (matmul_param_ != nullptr) {
delete matmul_param_;
matmul_param_ = nullptr;
}
}
void DeConvInt8CPUKernel::FreeTmpBuffer() {

Loading…
Cancel
Save