From 1639952d3f964af43a46b90002f8de632ad81890 Mon Sep 17 00:00:00 2001 From: hangq Date: Sun, 30 Aug 2020 20:38:39 +0800 Subject: [PATCH] update README & add oom check --- mindspore/lite/README.md | 6 +-- mindspore/lite/README_CN.md | 2 +- mindspore/lite/src/model.cc | 3 -- mindspore/lite/src/ops/primitive_c.cc | 29 +++++++++++++- .../anf_importer/import_from_meta_graphT.cc | 39 +++++++++++++++---- .../anf_importer/import_from_meta_graphT.h | 2 +- mindspore/lite/tools/benchmark/benchmark.h | 9 +---- 7 files changed, 67 insertions(+), 23 deletions(-) diff --git a/mindspore/lite/README.md b/mindspore/lite/README.md index 25a21b7443..1f0b6932ee 100644 --- a/mindspore/lite/README.md +++ b/mindspore/lite/README.md @@ -37,11 +37,11 @@ For more details please check out our [MindSpore Lite Architecture Guide](https: The pre-trained models provided by MindSpore include: [Image Classification](https://download.mindspore.cn/model_zoo/official/lite/) and [Object Detection](https://download.mindspore.cn/model_zoo/official/lite/). More models will be provided in the feature. - MindSpore allows you to retrain pre-trained models to perform other tasks. For example: using a pre-trained image classification model, it can be retrained to recognize new image types. See [Retraining](https://www.mindspore.cn/lite/tutorial/zh-CN/master/advanced_use/retraining_of_quantized_network.html). + MindSpore allows you to retrain pre-trained models to perform other tasks. 2. Model converter and optimization - If you use MindSpore or a third-party model, you need to use [MindSpore Lite Model Converter Tool](https://www.mindspore.cn/lite/tutorial/zh-CN/master/use/converter_tool.html) to convert the model into MindSpore Lite model. The MindSpore Lite model converter tool provides the converter of TensorFlow Lite, Caffe, ONNX to MindSpore Lite model, fusion and quantization could be introduced during convert procedure. + If you use MindSpore or a third-party model, you need to use [MindSpore Lite Model Converter Tool](https://www.mindspore.cn/lite/tutorial/en/master/use/converter_tool.html) to convert the model into MindSpore Lite model. The MindSpore Lite model converter tool provides the converter of TensorFlow Lite, Caffe, ONNX to MindSpore Lite model, fusion and quantization could be introduced during convert procedure. MindSpore also provides a tool to convert models running on IoT devices . @@ -51,6 +51,6 @@ For more details please check out our [MindSpore Lite Architecture Guide](https: 4. Inference - Load the model and perform inference. [Inference](https://www.mindspore.cn/lite/tutorial/zh-CN/master/use/runtime.html) is the process of running input data through the model to get output. + Load the model and perform inference. [Inference](https://www.mindspore.cn/lite/tutorial/en/master/use/runtime.html) is the process of running input data through the model to get output. MindSpore provides a series of pre-trained models that can be deployed on mobile device [example](#TODO). diff --git a/mindspore/lite/README_CN.md b/mindspore/lite/README_CN.md index d2051cae3b..75b880580a 100644 --- a/mindspore/lite/README_CN.md +++ b/mindspore/lite/README_CN.md @@ -45,7 +45,7 @@ MindSpore Lite是MindSpore推出的端云协同的、轻量化、高性能AI推 MindSpore提供的预训练模型包括:[图像分类(Image Classification)](https://download.mindspore.cn/model_zoo/official/lite/)和[目标检测(Object Detection)](https://download.mindspore.cn/model_zoo/official/lite/)。后续MindSpore团队会增加更多的预置模型。 - MindSpore允许您重新训练预训练模型,以执行其他任务。比如:使用预训练的图像分类模型,可以重新训练来识别新的图像类型。参见[重训练](https://www.mindspore.cn/lite/tutorial/zh-CN/master/advanced_use/retraining_of_quantized_network.html)。 + MindSpore允许您重新训练预训练模型,以执行其他任务。比如:使用预训练的图像分类模型,可以重新训练来识别新的图像类型。 2. 模型转换/优化 diff --git a/mindspore/lite/src/model.cc b/mindspore/lite/src/model.cc index cdc7c5f1c8..bafb15556b 100644 --- a/mindspore/lite/src/model.cc +++ b/mindspore/lite/src/model.cc @@ -33,9 +33,6 @@ class ModelImpl { void FreeMetaGraph(); int BuildOps(); - protected: - PrimitiveC *CopyPrimitive(const schema::Primitive *src_prim); - protected: const char *model_buf_; size_t buf_size_; diff --git a/mindspore/lite/src/ops/primitive_c.cc b/mindspore/lite/src/ops/primitive_c.cc index 7bc61f531a..94a8b26527 100644 --- a/mindspore/lite/src/ops/primitive_c.cc +++ b/mindspore/lite/src/ops/primitive_c.cc @@ -156,23 +156,50 @@ void PrimitiveC::SetQuantType(schema::QuantType quant_type) { this->quant_type_ schema::QuantType PrimitiveC::GetQuantType() const { return quant_type_; } std::shared_ptr GetReturnPrim() { - auto return_primitiveT = new schema::PrimitiveT; + auto return_primitiveT = new (std::nothrow) schema::PrimitiveT; + if (return_primitiveT == nullptr) { + MS_LOG(ERROR) << "new PrimitiveT failed"; + return nullptr; + } return_primitiveT->value.type = schema::PrimitiveType_Return; return_primitiveT->value.value = new schema::ReturnT; + if (return_primitiveT->value.value == nullptr) { + MS_LOG(ERROR) << "new ReturnT failed"; + delete (return_primitiveT); + return nullptr; + } return std::make_shared(return_primitiveT); } std::shared_ptr GetMakeTuplePrim() { auto make_tuple_primitiveT = new schema::PrimitiveT; + if (make_tuple_primitiveT == nullptr) { + MS_LOG(ERROR) << "new PrimitiveT failed"; + return nullptr; + } make_tuple_primitiveT->value.type = schema::PrimitiveType_MakeTuple; make_tuple_primitiveT->value.value = new schema::MakeTupleT; + if (make_tuple_primitiveT->value.value == nullptr) { + MS_LOG(ERROR) << "new MakeTupleT failed"; + delete (make_tuple_primitiveT); + return nullptr; + } return std::make_shared(make_tuple_primitiveT); } std::shared_ptr GetTupleGetItemPrim() { auto tuple_get_item_primitiveT = new schema::PrimitiveT(); + if (tuple_get_item_primitiveT == nullptr) { + MS_LOG(ERROR) << "new PrimitiveT failed"; + return nullptr; + } tuple_get_item_primitiveT->value.type = schema::PrimitiveType_TupleGetItem; tuple_get_item_primitiveT->value.value = new schema::TupleGetItemT; + if (tuple_get_item_primitiveT->value.value == nullptr) { + MS_LOG(ERROR) << "new TupleGetItemT failed"; + delete (tuple_get_item_primitiveT); + return nullptr; + } return std::make_shared(tuple_get_item_primitiveT); } diff --git a/mindspore/lite/tools/anf_importer/import_from_meta_graphT.cc b/mindspore/lite/tools/anf_importer/import_from_meta_graphT.cc index 6d7c6ed883..eaccef248a 100644 --- a/mindspore/lite/tools/anf_importer/import_from_meta_graphT.cc +++ b/mindspore/lite/tools/anf_importer/import_from_meta_graphT.cc @@ -112,8 +112,8 @@ abstract::AbstractTensorPtr AnfImporterFromMetaGraphT::ConvertTensorToAbstractTe return std::make_shared(type_ptr, shape); } -void AnfImporterFromMetaGraphT::ConvertAbstract(const std::unique_ptr &src_cnode, - const CNodePtr &dst_cnode) { +int AnfImporterFromMetaGraphT::ConvertAbstract(const std::unique_ptr &src_cnode, + const CNodePtr &dst_cnode) { MS_ASSERT(nullptr != meta_graph_); MS_ASSERT(nullptr != src_cnode); MS_ASSERT(nullptr != dst_cnode); @@ -133,7 +133,12 @@ void AnfImporterFromMetaGraphT::ConvertAbstract(const std::unique_ptrallTensors.at(out_tensor_id); MS_ASSERT(nullptr != tensor); abstract_list.emplace_back(ConvertTensorToAbstractTensor(tensor)); - auto tuple_get_item_prim = NewValueNode(GetTupleGetItemPrim()); + auto tuple_get_item_prim_ptr = GetTupleGetItemPrim(); + if (tuple_get_item_prim_ptr == nullptr) { + MS_LOG(ERROR) << "GetTupleGetItemPrim return nullptr"; + return RET_ERROR; + } + auto tuple_get_item_prim = NewValueNode(tuple_get_item_prim_ptr); auto get_item_value = NewValueNode(MakeValue(i)); std::vector inputs{tuple_get_item_prim, dst_cnode, get_item_value}; CNodePtr get_item_cnode = func_graph_->NewCNode(inputs); @@ -142,6 +147,7 @@ void AnfImporterFromMetaGraphT::ConvertAbstract(const std::unique_ptrset_abstract(std::make_shared(abstract_list)); } + return RET_OK; } int AnfImporterFromMetaGraphT::ConverterCNode() { @@ -161,7 +167,11 @@ int AnfImporterFromMetaGraphT::ConverterCNode() { } auto new_cnode = func_graph_->NewCNode(op_inputs); new_cnode->set_fullname_with_scope(cNode->name); - ConvertAbstract(cNode, new_cnode); + auto ret = ConvertAbstract(cNode, new_cnode); + if (ret != RET_OK) { + MS_LOG(ERROR) << "ConvertAbstract failed."; + return RET_ERROR; + } } return RET_OK; } @@ -171,7 +181,12 @@ int AnfImporterFromMetaGraphT::AddReturnCNode() { MS_EXCEPTION_IF_NULL(func_graph_); if (meta_graph_->outputIndex.size() > 1) { std::vector make_tuple_inputs; - auto make_tuple_prim = NewValueNode(GetMakeTuplePrim()); + auto make_tuple_prim_ptr = GetMakeTuplePrim(); + if (make_tuple_prim_ptr == nullptr) { + MS_LOG(ERROR) << "GetMakeTuplePrim return nullptr"; + return RET_ERROR; + } + auto make_tuple_prim = NewValueNode(make_tuple_prim_ptr); make_tuple_inputs.emplace_back(make_tuple_prim); for (auto tensor_id : meta_graph_->outputIndex) { auto cNode = GetNode(tensor_id); @@ -185,14 +200,24 @@ int AnfImporterFromMetaGraphT::AddReturnCNode() { make_tuple_cnode->set_fullname_with_scope("return tuple"); std::vector op_inputs; - auto value_node = NewValueNode(GetReturnPrim()); + auto return_prim_ptr = GetReturnPrim(); + if (return_prim_ptr == nullptr) { + MS_LOG(ERROR) << "GetReturnPrim return nullptr"; + return RET_ERROR; + } + auto value_node = NewValueNode(return_prim_ptr); op_inputs.emplace_back(value_node); op_inputs.emplace_back(make_tuple_cnode); auto cnode = func_graph_->NewCNode(op_inputs); cnode->set_fullname_with_scope("return"); func_graph_->set_return(cnode); } else { - auto value_node = NewValueNode(GetReturnPrim()); + auto return_prim_ptr = GetReturnPrim(); + if (return_prim_ptr == nullptr) { + MS_LOG(ERROR) << "GetReturnPrim return nullptr"; + return RET_ERROR; + } + auto value_node = NewValueNode(return_prim_ptr); std::vector op_inputs{value_node}; auto cnode = GetNode(meta_graph_->outputIndex.front()); if (nullptr == cnode) { diff --git a/mindspore/lite/tools/anf_importer/import_from_meta_graphT.h b/mindspore/lite/tools/anf_importer/import_from_meta_graphT.h index bed226c4d4..960c14a09f 100644 --- a/mindspore/lite/tools/anf_importer/import_from_meta_graphT.h +++ b/mindspore/lite/tools/anf_importer/import_from_meta_graphT.h @@ -41,7 +41,7 @@ class AnfImporterFromMetaGraphT : public AnfImporter { ValueNodePtr ConvertPrimitive(const std::unique_ptr &cNode); abstract::AbstractTensorPtr ConvertTensorToAbstractTensor(const std::unique_ptr &tensor); - void ConvertAbstract(const std::unique_ptr &src_cnode, const CNodePtr &dst_cnode); + int ConvertAbstract(const std::unique_ptr &src_cnode, const CNodePtr &dst_cnode); int AddReturnCNode() override; diff --git a/mindspore/lite/tools/benchmark/benchmark.h b/mindspore/lite/tools/benchmark/benchmark.h index 0df9f5424b..f8dfdd1e44 100644 --- a/mindspore/lite/tools/benchmark/benchmark.h +++ b/mindspore/lite/tools/benchmark/benchmark.h @@ -54,8 +54,6 @@ class MS_API BenchmarkFlags : public virtual FlagParser { // common AddFlag(&BenchmarkFlags::modelPath, "modelPath", "Input model path", ""); AddFlag(&BenchmarkFlags::inDataPath, "inDataPath", "Input data path, if not set, use random input", ""); - AddFlag(&BenchmarkFlags::inDataTypeIn, "inDataType", "Input data type. img | bin", "bin"); - AddFlag(&BenchmarkFlags::omModelPath, "omModelPath", "OM model path, only required when device is NPU", ""); AddFlag(&BenchmarkFlags::device, "device", "CPU | GPU", "CPU"); AddFlag(&BenchmarkFlags::cpuBindMode, "cpuBindMode", "Input -1 for MID_CPU, 1 for HIGHER_CPU, 0 for NO_BIND, defalut value: 1", 1); @@ -67,8 +65,6 @@ class MS_API BenchmarkFlags : public virtual FlagParser { // MarkAccuracy AddFlag(&BenchmarkFlags::calibDataPath, "calibDataPath", "Calibration data file path", ""); AddFlag(&BenchmarkFlags::accuracyThreshold, "accuracyThreshold", "Threshold of accuracy", 0.5); - // Resize - AddFlag(&BenchmarkFlags::resizeDimsIn, "resizeDims", "Dims to resize to", ""); } ~BenchmarkFlags() override = default; @@ -83,7 +79,7 @@ class MS_API BenchmarkFlags : public virtual FlagParser { std::string inDataPath; std::vector input_data_list; InDataType inDataType; - std::string inDataTypeIn; + std::string inDataTypeIn = "bin"; int cpuBindMode = 1; // MarkPerformance int loopCount; @@ -94,10 +90,9 @@ class MS_API BenchmarkFlags : public virtual FlagParser { std::string calibDataPath; float accuracyThreshold; // Resize - std::string resizeDimsIn; + std::string resizeDimsIn = ""; std::vector> resizeDims; - std::string omModelPath; std::string device; };