diff --git a/mindspore/lite/src/kernel_registry.h b/mindspore/lite/src/kernel_registry.h index 552f6bec46..d0fc1911f5 100644 --- a/mindspore/lite/src/kernel_registry.h +++ b/mindspore/lite/src/kernel_registry.h @@ -36,13 +36,11 @@ class KernelRegistry { static KernelRegistry *GetInstance(); int Init(); - void FreeCreatorArray(); virtual kernel::KernelCreator GetCreator(const kernel::KernelKey &desc); const kernel::KernelCreator *GetCreatorArrays(); - int GetCreatorFuncIndex(const kernel::KernelKey desc); - void RegKernel(const kernel::KernelKey desc, kernel::KernelCreator creator); - void RegKernel(const kernel::KERNEL_ARCH arch, const TypeId data_type, const schema::PrimitiveType type, - kernel::KernelCreator creator); + int GetCreatorFuncIndex(kernel::KernelKey desc); + void RegKernel(kernel::KernelKey desc, kernel::KernelCreator creator); + void RegKernel(kernel::KERNEL_ARCH arch, TypeId data_type, schema::PrimitiveType type, kernel::KernelCreator creator); bool Merge(const std::unordered_map &newCreators); kernel::LiteKernel *GetKernel(const std::vector &in_tensors, const std::vector &out_tensors, const PrimitiveC *primitive, const InnerContext *ctx, const kernel::KernelKey &key); @@ -61,6 +59,7 @@ class KernelRegistrar { KernelRegistrar(const kernel::KernelKey &desc, kernel::KernelCreator creator) { KernelRegistry::GetInstance()->RegKernel(desc, creator); } + ~KernelRegistrar() = default; KernelRegistrar(const kernel::KERNEL_ARCH arch, const TypeId data_type, const schema::PrimitiveType op_type, kernel::KernelCreator creator) { diff --git a/mindspore/lite/src/model_common.cc b/mindspore/lite/src/model_common.cc index 87428952cc..eae287a027 100644 --- a/mindspore/lite/src/model_common.cc +++ b/mindspore/lite/src/model_common.cc @@ -23,6 +23,7 @@ int ConvertSubGraph(const schema::SubGraph &sub_graph, Model *model) { MS_LOG(ERROR) << "new subGraph fail!"; return RET_ERROR; } + MS_ASSERT(sub_graph.name() != nullptr); subgraph->name_ = sub_graph.name()->c_str(); MS_ASSERT(sub_graph.inputIndices() != nullptr); auto in_count = sub_graph.inputIndices()->size(); @@ -68,6 +69,7 @@ const void *GetMetaGraphByVerison(const char *buf, const int &schema_version) { } int GenerateModelByVersion(const void *meta_graph, Model *model, const int &schema_version) { + MS_ASSERT(meta_graph != nullptr); MS_ASSERT(model != nullptr); int status = RET_ERROR; if (schema_version == SCHEMA_VERSION::SCHEMA_CUR) { diff --git a/mindspore/lite/src/param_value_lite.h b/mindspore/lite/src/param_value_lite.h index 338a046908..e3f95a93df 100644 --- a/mindspore/lite/src/param_value_lite.h +++ b/mindspore/lite/src/param_value_lite.h @@ -27,7 +27,7 @@ namespace mindspore { class ParamValueLite : public Value { public: ParamValueLite() : tensor_addr_(nullptr), tensor_size_(0) {} - virtual ~ParamValueLite() { + ~ParamValueLite() override { if (tensor_addr_ != nullptr) { auto tensor_mem = reinterpret_cast(tensor_addr_); delete[](tensor_mem); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/dequant.h b/mindspore/lite/src/runtime/kernel/arm/base/dequant.h index 1363fb0515..7953b1ce06 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/dequant.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/dequant.h @@ -58,7 +58,7 @@ class DequantUtil { } } } else if (input_tensor->GetQuantParams().size() != kPerTensor) { - size_t channels = static_cast(input_tensor->Batch()); + auto channels = static_cast(input_tensor->Batch()); if (input_tensor->GetQuantParams().size() != channels) { MS_LOG(ERROR) << "Quant param not equal channel num " << input_tensor->GetQuantParams().size() << channels; free(dequant_datas); @@ -136,6 +136,10 @@ class DequantUtil { template static void UnPackUtil(const schema::Tensor *input_tensor, int origin_bit, void *unpack_int_data) { + if (input_tensor == nullptr || input_tensor->data() == nullptr) { + MS_LOG(ERROR) << "tensor data is null"; + return; + } auto weight_data = input_tensor->data()->data(); int pack_size = input_tensor->dataType() == kNumberTypeInt8 ? input_tensor->data()->size() : input_tensor->data()->size() / 2; diff --git a/mindspore/lite/src/runtime/thread_pool.c b/mindspore/lite/src/runtime/thread_pool.c index 9e7ed80c0e..90c326d2ac 100644 --- a/mindspore/lite/src/runtime/thread_pool.c +++ b/mindspore/lite/src/runtime/thread_pool.c @@ -848,6 +848,7 @@ ThreadPool *CreateThreadPool(int thread_num, int mode) { if (thread_pool->thread_list == NULL) { LOG_ERROR("create thread list failed"); DestroyThreadPool(thread_pool); + thread_pool = NULL; return NULL; } thread_pool->thread_list->head = NULL; diff --git a/mindspore/lite/tools/anf_importer/import_from_protobuf.cc b/mindspore/lite/tools/anf_importer/import_from_protobuf.cc index 6ed145ac60..1104c66913 100644 --- a/mindspore/lite/tools/anf_importer/import_from_protobuf.cc +++ b/mindspore/lite/tools/anf_importer/import_from_protobuf.cc @@ -15,11 +15,7 @@ */ #include "tools/anf_importer/import_from_protobuf.h" - -#include #include - -#include #include #include #include @@ -243,7 +239,7 @@ int AnfImporterFromProtobuf::BuildParameterForFuncGraph(const ParameterPtr &node node->set_abstract(abstract_tensor); if (default_para_map_.find(value_proto.name()) != default_para_map_.end()) { - Tensor *tensor_info = new Tensor(kDefaultValueSwitchMap[tensor_typeproto.elem_type()], shape); + auto *tensor_info = new Tensor(kDefaultValueSwitchMap[tensor_typeproto.elem_type()], shape); if (tensor_info == nullptr) { return RET_MEMORY_FAILED; } @@ -435,7 +431,7 @@ bool AnfImporterFromProtobuf::GetAttrValueForCNode(const PrimitivePtr &prim, con } if (kParseTypeSwitchMap[type] == FORM_PARSE_SCALAR) { if (kv.size() == 1) { - std::unordered_map::iterator iter = kv.begin(); + auto iter = kv.begin(); prim->AddAttr(attr_name, iter->second); } else { auto res = ParserScalarAttrValue(ref_attr_name, kv); @@ -459,7 +455,7 @@ bool AnfImporterFromProtobuf::ObtainValueNodeInTensorForm(const std::string &val param_value->set_tensor_shape(shape_vector); param_value->set_tensor_type(kDefaultValueSwitchMap[attr_tensor_type]); const std::string &tensor_buf = attr_tensor.raw_data(); - auto tensor_data = new (std::nothrow) char[tensor_buf.size()]; + auto tensor_data = new (std::nothrow) char[tensor_buf.size() + 1]; if (tensor_data == nullptr) { MS_LOG(ERROR) << "Tensor_data is nullptr"; return false; @@ -648,14 +644,14 @@ CNodePtr AnfImporterFromProtobuf::BuildCNodeForFuncGraph(const FuncGraphPtr &out MS_LOG(ERROR) << "funcgraph new cnode failed"; return nullptr; } - if (0 == kv.size()) { + if (kv.empty()) { AbstractBasePtrList elem; for (size_t index = 1; index < cnode_ptr->inputs().size(); ++index) { elem.push_back(cnode_ptr->input(index)->abstract()); } cnode_ptr->set_abstract(std::make_shared(elem)); } else if (1 == kv.size()) { - std::unordered_map::iterator iter = kv.begin(); + auto iter = kv.begin(); cnode_ptr->set_abstract(iter->second); } else { auto abstract = ParserAttrShape(shape_ref_attr_name, kv); diff --git a/mindspore/lite/tools/anf_importer/import_from_protobuf.h b/mindspore/lite/tools/anf_importer/import_from_protobuf.h index e1417f7c83..3ca2a46cb5 100644 --- a/mindspore/lite/tools/anf_importer/import_from_protobuf.h +++ b/mindspore/lite/tools/anf_importer/import_from_protobuf.h @@ -57,17 +57,17 @@ class AnfImporterFromProtobuf : public AnfImporter { const schema::QuantType &quantType); bool BuildReturnForFuncGraph(const FuncGraphPtr &outputFuncGraph, const onnx::GraphProto &importProto, const CNodePtr &cnode_ptr); - bool GetAttrValueForCNode(const PrimitivePtr &prim, const onnx::AttributeProto &attr_proto); - bool ObtainCNodeAttrInTypeForm(const PrimitivePtr &prim, const std::string &attr_name, - const onnx::TensorProto &attr_tensor); - ValuePtr ObtainCNodeAttrInScalarForm(const onnx::TensorProto &attr_tensor); - bool ObtainCNodeAttrInTensorForm(const PrimitivePtr &prim, const std::string &attr_name, - const onnx::TensorProto &attr_tensor); + static bool GetAttrValueForCNode(const PrimitivePtr &prim, const onnx::AttributeProto &attr_proto); + static bool ObtainCNodeAttrInTypeForm(const PrimitivePtr &prim, const std::string &attr_name, + const onnx::TensorProto &attr_tensor); + static ValuePtr ObtainCNodeAttrInScalarForm(const onnx::TensorProto &attr_tensor); + static bool ObtainCNodeAttrInTensorForm(const PrimitivePtr &prim, const std::string &attr_name, + const onnx::TensorProto &attr_tensor); bool BuildValueNodeForFuncGraph(const onnx::NodeProto &node_proto); bool ObtainValueNodeInTensorForm(const string &value_node_name, const onnx::TensorProto &attr_tensor); bool GetAttrValueForValueNode(const std::string &value_node_name, const onnx::AttributeProto &attr_proto); bool ObtainValueNodeInTypeForm(const string &value_node_name, const onnx::TensorProto &attr_tensor); - std::unordered_map GetAbstractForCNode( + static std::unordered_map GetAbstractForCNode( const onnx::AttributeProto &attr_proto); private: diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.cc index 3f4b746a53..29860b45eb 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.cc @@ -147,7 +147,6 @@ STATUS MulAddFusionPass::AddNewScaleNode(MetaGraphT *graph, const std::unique_pt scaleParam->axis = 0 - shape_size; mulNode->inputIndex.push_back(addBiasIndex); MS_ASSERT(addNode->primitive != nullptr); - MS_ASSERT(addNode->primitive->value != nullptr); MS_ASSERT(addNode->primitive->value.AsAdd() != nullptr); auto activationType = addNode->primitive->value.AsAdd()->activationType; if (activationType == ActivationType_RELU || activationType == ActivationType_RELU6 || @@ -163,7 +162,6 @@ STATUS MulAddFusionPass::AddNewScaleNode(MetaGraphT *graph, const std::unique_pt // repace addnode as activation std::unique_ptr activationParam(new ActivationT()); MS_ASSERT(addNode->primitive != nullptr); - MS_ASSERT(addNode->primitive->value != nullptr); MS_ASSERT(addNode->primitive->value.AsAdd() != nullptr); activationParam->type = addNode->primitive->value.AsAdd()->activationType; addNode->primitive->value.type = schema::PrimitiveType_Activation; diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.cc index e7a051f98e..59058ae13a 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.cc @@ -19,8 +19,7 @@ #include #include -namespace mindspore { -namespace lite { +namespace mindspore::lite { constexpr int32_t kSingleGroup = 1; bool OnnxConvParser::ParseGroupConvolution(const std::unique_ptr &attr, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx DepthwiseConvParser"; @@ -140,6 +139,7 @@ STATUS OnnxConvParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod } std::vector weight_shape; auto size = (*nodeIter).dims_size(); + weight_shape.reserve(size); for (int i = 0; i < size; ++i) { weight_shape.emplace_back((*nodeIter).dims(i)); } @@ -157,7 +157,6 @@ STATUS OnnxConvParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod auto iter = std::find_if((*nodeIter).attribute().begin(), (*nodeIter).attribute().end(), [](const onnx::AttributeProto &attr) { return attr.name() == "shape"; }); if (iter != (*nodeIter).attribute().end()) { - MS_ASSERT(iter->ints() != nullptr); MS_ASSERT(iter->ints().begin() != nullptr); MS_ASSERT(iter->ints().end() != nullptr); dims.insert(dims.begin(), iter->ints().begin(), iter->ints().end()); @@ -188,5 +187,4 @@ OnnxNodeRegistrar g_onnxConvParser("Conv", new OnnxConvParser()); OnnxNodeRegistrar g_onnxInt8ConvParser("Int8Conv", new OnnxConvParser()); OnnxNodeRegistrar g_onnxConvReluParser("ConvRelu", new OnnxConvParser()); OnnxNodeRegistrar g_onnxInt8ConvReluParser("Int8ConvRelu", new OnnxConvParser()); -} // namespace lite -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.h index ab2e8cf5d0..0162c6ffe4 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.h @@ -31,7 +31,7 @@ class OnnxConvParser : public OnnxNodeParser { STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; private: - bool ParseGroupConvolution(const std::unique_ptr &attr, schema::CNodeT *op); + static bool ParseGroupConvolution(const std::unique_ptr &attr, schema::CNodeT *op); }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_lp_norm_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_lp_norm_parser.cc index 6835de0201..bddbfe9bdf 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_lp_norm_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_lp_norm_parser.cc @@ -17,8 +17,7 @@ #include "tools/converter/parser/onnx/onnx_lp_norm_parser.h" #include -namespace mindspore { -namespace lite { +namespace mindspore::lite { STATUS OnnxLpNormParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx LpNormParser"; @@ -38,13 +37,12 @@ STATUS OnnxLpNormParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::N return RET_NULL_PTR; } - auto onnx_node_attr = onnx_node.attribute(); - for (int i = 0; i < onnx_node_attr.size(); ++i) { - MS_ASSERT(onnx_node_attr.at(i) != nullptr); - if (onnx_node_attr.at(i).name() == "axis") { - attr->axis = onnx_node_attr.at(i).i(); - } else if (onnx_node_attr.at(i).name() == "p") { - attr->p = onnx_node_attr.at(i).i(); + for (const auto &onnx_node_attr : onnx_node.attribute()) { + const auto &attribute_name = onnx_node_attr.name(); + if (attribute_name == "axis") { + attr->axis = onnx_node_attr.i(); + } else if (attribute_name == "p") { + attr->p = onnx_node_attr.i(); } } @@ -54,5 +52,4 @@ STATUS OnnxLpNormParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::N } OnnxNodeRegistrar g_onnxLpNormParser("LpNormalization", new OnnxLpNormParser()); -} // namespace lite -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_lrn_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_lrn_parser.cc index f27ae16415..267abfa8b8 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_lrn_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_lrn_parser.cc @@ -17,8 +17,7 @@ #include "tools/converter/parser/onnx/onnx_lrn_parser.h" #include -namespace mindspore { -namespace lite { +namespace mindspore::lite { STATUS OnnxLrnParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx LrnParser"; if (op == nullptr) { @@ -37,18 +36,17 @@ STATUS OnnxLrnParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Node return RET_NULL_PTR; } - auto onnx_node_attr = onnx_node.attribute(); int32_t size = 0; - for (int i = 0; i < onnx_node_attr.size(); ++i) { - MS_ASSERT(onnx_node_attr.at(i) != nullptr); - if (onnx_node_attr.at(i).name() == "alpha") { - attr->alpha = onnx_node_attr.at(i).f(); - } else if (onnx_node_attr.at(i).name() == "beta") { - attr->beta = onnx_node_attr.at(i).f(); - } else if (onnx_node_attr.at(i).name() == "bias") { - attr->bias = onnx_node_attr.at(i).f(); - } else if (onnx_node_attr.at(i).name() == "size") { - size = static_cast(onnx_node_attr.at(i).i()); + for (const auto &onnx_node_attr : onnx_node.attribute()) { + const auto &attribute_name = onnx_node_attr.name(); + if (attribute_name == "alpha") { + attr->alpha = onnx_node_attr.f(); + } else if (attribute_name == "beta") { + attr->beta = onnx_node_attr.f(); + } else if (attribute_name == "bias") { + attr->bias = onnx_node_attr.f(); + } else if (attribute_name == "size") { + size = static_cast(onnx_node_attr.i()); attr->depth_radius = size / 2; } } @@ -66,5 +64,4 @@ STATUS OnnxLrnParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Node OnnxNodeRegistrar g_onnxLrnxParser("Lrn", new OnnxLrnParser()); OnnxNodeRegistrar g_onnxLRNxParser("LRN", new OnnxLrnParser()); -} // namespace lite -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore/lite/tools/converter/quantizer/post_training_quantizer.h b/mindspore/lite/tools/converter/quantizer/post_training_quantizer.h index a96ecf49fc..baac54b342 100644 --- a/mindspore/lite/tools/converter/quantizer/post_training_quantizer.h +++ b/mindspore/lite/tools/converter/quantizer/post_training_quantizer.h @@ -192,22 +192,24 @@ class Calibrator { STATUS AddQuantizedOp(const CNodePtr &node); - STATUS RecordMaxValue(const std::vector &data, const std::unique_ptr &diverg_info); + static STATUS RecordMaxValue(const std::vector &data, const std::unique_ptr &diverg_info); - STATUS UpdateDivergInverval(std::unordered_map>> *diverg_info); + static STATUS UpdateDivergInverval( + std::unordered_map>> *diverg_info); - STATUS UpdateDataFrequency(const std::vector &data, const std::unique_ptr &diverg_info); + static STATUS UpdateDataFrequency(const std::vector &data, const std::unique_ptr &diverg_info); void Dump(); STATUS ComputeThreshold(); - std::unordered_map GetScale( + static std::unordered_map GetScale( std::unordered_map> *diverg_info); - std::unordered_map GetZeropoint( + static std::unordered_map GetZeropoint( std::unordered_map> *diverg_info); - std::map GetMinMax(std::unordered_map> *diverg_info); + static std::map GetMinMax( + std::unordered_map> *diverg_info); std::unordered_map>> *GetInputDivergInfo(); diff --git a/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc b/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc index 6af61f5476..20f6c53ae3 100644 --- a/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc @@ -110,8 +110,9 @@ ParameterPtr CreateNewParamter(const FuncGraphPtr &func_graph, Tensor *tensor) { parameter->set_default_param(param_value); return parameter; } -kernel::LiteKernel *GetLiteKernel(std::vector inputs, std::vector outputs, OpParameter *parameter, - lite::InnerContext *context, mindspore::lite::PrimitiveC *primitive) { +kernel::LiteKernel *GetLiteKernel(std::vector inputs, const std::vector &outputs, + OpParameter *parameter, lite::InnerContext *context, + mindspore::lite::PrimitiveC *primitive) { MS_ASSERT(nullptr != lite_primitive); auto data_type = inputs.front()->data_type(); kernel::KernelKey desc{kernel::KERNEL_ARCH::kCPU, data_type, (schema::PrimitiveType)primitive->Type()}; @@ -163,15 +164,15 @@ lite::STATUS ReplaceCNode(const FuncGraphPtr &func_graph, const CNodePtr &any_no } // namespace void FreeTensors(std::vector *input_tensor, std::vector *output_tensor) { if (input_tensor != nullptr) { - for (size_t i = 0; i < input_tensor->size(); i++) { - delete (*input_tensor)[i]; - (*input_tensor)[i] = nullptr; + for (auto &i : *input_tensor) { + delete i; + i = nullptr; } } if (output_tensor != nullptr) { - for (size_t i = 0; i < output_tensor->size(); i++) { - delete (*output_tensor)[i]; - (*output_tensor)[i] = nullptr; + for (auto &i : *output_tensor) { + delete i; + i = nullptr; } } } @@ -231,9 +232,9 @@ const AnfNodePtr ConstFoldPass::Process(const FuncGraphPtr &func_graph, const An // here, input_tensor's format need to be transposed nhwc according to fmkType, // but for the time being, we only transpose the tensor with 0/1/2/3D. // Others should be added in future. - for (size_t j = 0; j < input_tensors.size(); ++j) { - input_tensors[j]->SetFormat(schema::Format::Format_NHWC); - if (input_tensors[j]->shape().size() == 4) { + for (auto &input_tensor : input_tensors) { + input_tensor->SetFormat(schema::Format::Format_NHWC); + if (input_tensor->shape().size() == 4) { MS_LOG(INFO) << "init input_tensor format to nhwc"; } } diff --git a/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc b/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc index a841e4d55d..208edf15b0 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc @@ -233,14 +233,11 @@ const void ConvTransformFusion::CalNewWeightTensor(float *weight_data, int kerne delete[] tmp_weight_data; return; } - - if (tmp_weight_data != nullptr) { - delete[] tmp_weight_data; - } + delete[] tmp_weight_data; } const void ConvTransformFusion::CalNewBiasTensor(float *bias_data, int kernel_num, bool bias_flag, - const float *trans_scale, const float *trans_bias) const { + const float *trans_scale, const float *trans_bias) { MS_ASSERT(bias_data != nullptr); if (bias_flag) { auto tmp_bias_data = new (std::nothrow) float[kernel_num]; diff --git a/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.h b/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.h index 2017b0de27..da161c0192 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.h +++ b/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.h @@ -31,7 +31,7 @@ class ConvTransformFusion : public PatternProcessPass { virtual const void InitTransParam(const CNodePtr &, int, float *, float *) const = 0; const void GenNewConvTensor(const FuncGraphPtr &, const CNodePtr &, int, const float *, const float *) const; const void CalNewWeightTensor(float *, int, int, const float *) const; - const void CalNewBiasTensor(float *, int, bool, const float *, const float *) const; + static const void CalNewBiasTensor(float *, int, bool, const float *, const float *); }; } // namespace mindspore::opt #endif // MINDSPORE_LITE_SRC_PASS_FUSION_CONV_TRANSFORM_FUSION_H_ diff --git a/mindspore/lite/tools/optimizer/graph/unused_transpose_node_remove_pass.cc b/mindspore/lite/tools/optimizer/graph/unused_transpose_node_remove_pass.cc index 67f277215a..2493b3345d 100644 --- a/mindspore/lite/tools/optimizer/graph/unused_transpose_node_remove_pass.cc +++ b/mindspore/lite/tools/optimizer/graph/unused_transpose_node_remove_pass.cc @@ -79,7 +79,6 @@ bool RemoveUnusedTransposeOpPass::Run(const FuncGraphPtr &func_graph) { MS_LOG(ERROR) << "Transpose node of onnx need to removed which has not primitiveT"; return RET_ERROR; } - MS_ASSERT(primT->value != nullptr); MS_ASSERT(primT->value.AsTranspose() != nullptr); std::vector perm = primT->value.AsTranspose()->perm; if (perm == kPermNHWC) {