!8838 【MSLITE】tools check nullptr sec-icsl master

From: @guohonhzilonghw
Reviewed-by: @zhanghaibo5,@HilbertDavid
Signed-off-by: @zhanghaibo5
pull/8838/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit 5998f3cce2

@ -112,6 +112,10 @@ VarNodePtr CreateVarNodeWithSexp(const BaseRef &sexp, const BaseRef &graph) {
AnfNodePtr HandleSexpVector(const BaseRef &sexp, const BaseRef &graph, PrimitiveVarMap *primitive_vars,
bool multigraph) {
if (primitive_vars == nullptr) {
lite::ReturnCode::GetSingleReturnCode()->UpdateReturnCode(lite::RET_NULL_PTR);
return nullptr;
}
MS_LOG(DEBUG) << "HandleSexpVector sexp: " + sexp.ToString() + ", graph " + graph.ToString();
std::vector<AnfNodePtr> input_nodes;
const auto &tuple = utils::cast<VectorRef>(sexp);
@ -499,6 +503,10 @@ bool IsQuantNode(const BaseRef &n) {
}
bool CheckIsAllInputsParam(const AnfNodePtr &node) {
if (node == nullptr) {
lite::ReturnCode::GetSingleReturnCode()->UpdateReturnCode(lite::RET_NULL_PTR);
return 0;
}
if (utils::isa<CNode>(node)) {
auto cnode = node->cast<CNodePtr>();
for (size_t i = 1; i < cnode->inputs().size(); i++) {
@ -537,6 +545,10 @@ size_t GetOutputTensorNum(const AnfNodePtr &node) {
}
bool IsMultiOutputTensors(const FuncGraphPtr &graph, const AnfNodePtr &node) {
if (node == nullptr || graph == nullptr) {
lite::ReturnCode::GetSingleReturnCode()->UpdateReturnCode(lite::RET_NULL_PTR);
return 0;
}
auto output_node_list = GetRealNodeUsedList(graph, node);
if (output_node_list->size() != 1) {
MS_LOG(DEBUG) << "fusion node has multi output nodes";
@ -548,7 +560,7 @@ bool IsMultiOutputTensors(const FuncGraphPtr &graph, const AnfNodePtr &node) {
std::shared_ptr<std::vector<std::pair<AnfNodePtr, int>>> GetRealNodeUsedList(const FuncGraphPtr &graph,
const AnfNodePtr &node) {
auto output_node_list = std::make_shared<std::vector<std::pair<AnfNodePtr, int>>>();
if (graph == nullptr) {
if (graph == nullptr || node == nullptr) {
lite::ReturnCode::GetSingleReturnCode()->UpdateReturnCode(lite::RET_NULL_PTR);
return nullptr;
}

@ -75,6 +75,9 @@ bool PassManager::Run(const FuncGraphPtr &func_graph, const std::vector<PassPtr>
}
bool PassManager::Run(const FuncGraphPtr &func_graph) const {
if (func_graph == nullptr) {
return false;
}
bool changed = false;
// run all passes
bool change = true;

@ -76,6 +76,8 @@ std::vector<Tensor *> GetCNodeInputTensors(const CNodePtr &CNode) {
}
ParameterPtr CreateNewParamter(const FuncGraphPtr &func_graph, Tensor *tensor) {
MS_ASSERT(func_graph != nullptr);
MS_ASSERT(tensor != nullptr);
auto parameter = func_graph->add_parameter();
std::vector<int> shape(tensor->shape());
std::vector<int64_t> shape_vector;

@ -38,6 +38,8 @@ const BaseRef ConvActivationFusion::DefinePattern() const {
const AnfNodePtr ConvActivationFusion::Process(const FuncGraphPtr &func_graph, const AnfNodePtr &node,
const EquivPtr &) const {
MS_ASSERT(func_graph != nullptr);
MS_ASSERT(node != nullptr);
MS_LOG(DEBUG) << "conv activation pass process:" << schema::EnumNamesPrimitiveType()[primitive_type];
if (CheckIfFuncGraphIsNull(func_graph) != lite::RET_OK || CheckIfAnfNodeIsNull(node) != lite::RET_OK) {
lite::ReturnCode::GetSingleReturnCode()->UpdateReturnCode(lite::RET_NULL_PTR);

@ -81,6 +81,9 @@ int Get_Kenrnel_nums(const CNodePtr &conv_node) {
}
}
int GenConvNewBias(const FuncGraphPtr &func_graph, const CNodePtr &conv_node, const CNodePtr &bias_node) {
MS_ASSERT(func_graph != nullptr);
MS_ASSERT(conv_node != nullptr);
MS_ASSERT(bias_node != nullptr);
AnfNodePtr conv_bias_node = nullptr;
AnfNodePtr conv_weight_node = nullptr;
if (conv_node->inputs().size() == kConvNoBiasLen) {
@ -158,6 +161,8 @@ const BaseRef ConvBiasaddFusion::DefinePattern() const {
const AnfNodePtr ConvBiasaddFusion::Process(const FuncGraphPtr &func_graph, const AnfNodePtr &node,
const EquivPtr &) const {
MS_ASSERT(func_graph != nullptr);
MS_ASSERT(node != nullptr);
MS_LOG(DEBUG) << "Enter pass process";
if (CheckIfFuncGraphIsNull(func_graph) != lite::RET_OK || CheckIfAnfNodeIsNull(node) != lite::RET_OK) {
return nullptr;

@ -111,6 +111,8 @@ const BaseRef ConvBatchNormFusion::DefinePattern() const {
const void ConvBatchNormFusion::InitTransParam(const CNodePtr &bn_node, int kernel_num, float *trans_scale,
float *trans_bias) const {
MS_ASSERT(bn_node != nullptr);
MS_ASSERT(trans_bias != nullptr);
MS_ASSERT(trans_scale != nullptr);
AnfNodePtr bn_mean_node = nullptr;
AnfNodePtr bn_variance_node = nullptr;
AnfNodePtr bn_scale_node = nullptr;

@ -48,6 +48,8 @@ const BaseRef ConvScaleFusion::DefinePattern() const {
const void ConvScaleFusion::InitTransParam(const CNodePtr &scale_node, int kernel_num, float *trans_scale,
float *trans_bias) const {
MS_ASSERT(scale_node != nullptr);
MS_ASSERT(trans_bias != nullptr);
MS_ASSERT(trans_scale != nullptr);
AnfNodePtr scale_weight_node;
AnfNodePtr scale_bias_node;
if (scale_node->inputs().size() == kScaleNoBiasLen) {

@ -210,6 +210,7 @@ const void ConvTransformFusion::GenNewConvTensor(const FuncGraphPtr &func_graph,
const void ConvTransformFusion::CalNewWeightTensor(float *weight_data, int kernel_num, int kernel_size,
const float *trans_scale) const {
MS_ASSERT(weight_data != nullptr);
MS_ASSERT(trans_scale != nullptr);
auto tmp_weight_data = new (std::nothrow) float[kernel_num * kernel_size];
MS_ASSERT(new_weight_data != nullptr);
auto data_size = kernel_num * kernel_size * sizeof(float);
@ -239,6 +240,8 @@ const void ConvTransformFusion::CalNewWeightTensor(float *weight_data, int kerne
const void ConvTransformFusion::CalNewBiasTensor(float *bias_data, int kernel_num, bool bias_flag,
const float *trans_scale, const float *trans_bias) {
MS_ASSERT(bias_data != nullptr);
MS_ASSERT(trans_bias != nullptr);
MS_ASSERT(trans_scale != nullptr);
if (bias_flag) {
auto tmp_bias_data = new (std::nothrow) float[kernel_num];
if (tmp_bias_data == nullptr) {

@ -44,6 +44,8 @@ const BaseRef ConvTupleActivationFusion::DefinePattern() const {
const AnfNodePtr ConvTupleActivationFusion::Process(const FuncGraphPtr &func_graph, const AnfNodePtr &node,
const EquivPtr &) const {
MS_ASSERT(func_graph != nullptr);
MS_ASSERT(node != nullptr);
MS_LOG(DEBUG) << "conv tuple activation pass process:" << schema::EnumNamesPrimitiveType()[primitive_type];
if (CheckIfFuncGraphIsNull(func_graph) != lite::RET_OK || CheckIfAnfNodeIsNull(node) != lite::RET_OK) {
return nullptr;

@ -137,6 +137,8 @@ CNodePtr LayerNormFusion::CreateLayerNormNode(const FuncGraphPtr &func_graph, co
const AnfNodePtr LayerNormFusion::Process(const FuncGraphPtr &func_graph, const AnfNodePtr &node,
const EquivPtr &equiv) const {
MS_ASSERT(func_graph != nullptr);
MS_ASSERT(node != nullptr);
MS_LOG(DEBUG) << "layer_norm pass";
if (CheckIfFuncGraphIsNull(func_graph) != lite::RET_OK || CheckIfAnfNodeIsNull(node) != lite::RET_OK) {
lite::ReturnCode::GetSingleReturnCode()->UpdateReturnCode(lite::RET_NULL_PTR);

@ -41,6 +41,8 @@ const BaseRef PoolingActivationFusion::DefinePattern() const {
const AnfNodePtr PoolingActivationFusion::Process(const FuncGraphPtr &func_graph, const AnfNodePtr &node,
const EquivPtr &) const {
MS_ASSERT(func_graph != nullptr);
MS_ASSERT(node != nullptr);
MS_LOG(DEBUG) << "pooling activation pass process:" << schema::EnumNamesPrimitiveType()[primitive_type];
CheckIfFuncGraphIsNull(func_graph);

@ -33,6 +33,8 @@ const BaseRef QuantDtypeCastFusion::DefinePattern() const {
const AnfNodePtr QuantDtypeCastFusion::Process(const FuncGraphPtr &func_graph, const AnfNodePtr &node,
const EquivPtr &) const {
MS_ASSERT(func_graph != nullptr);
MS_ASSERT(node != nullptr);
MS_LOG(DEBUG) << "quant dtype cast fusion pass process";
if (CheckIfFuncGraphIsNull(func_graph) != lite::RET_OK || CheckIfAnfNodeIsNull(node) != lite::RET_OK) {
return nullptr;

Loading…
Cancel
Save