From e19a3e39267054ca41047591f0da1915c9624835 Mon Sep 17 00:00:00 2001 From: hangq Date: Wed, 21 Oct 2020 14:23:20 +0800 Subject: [PATCH] remove unused Prepare calling in operator & add CreateSession(const char *model_buf, size_t size, lite::Context *context) interface --- build.sh | 27 ++-- mindspore/lite/include/lite_session.h | 11 +- mindspore/lite/src/CMakeLists.txt | 1 + mindspore/lite/src/lite_session.cc | 44 +++++- mindspore/lite/src/lite_session.h | 4 +- mindspore/lite/src/model.cc | 118 +-------------- mindspore/lite/src/model_common.cc | 138 ++++++++++++++++++ mindspore/lite/src/model_common.h | 29 ++++ .../src/runtime/kernel/arm/fp16/crop_fp16.cc | 1 + .../arm/fp16/deconvolution_winograd_fp16.cc | 6 - .../kernel/arm/fp16/quant_dtype_cast_fp16.cc | 6 - .../kernel/arm/fp32/deconvolution_winograd.cc | 5 - .../runtime/kernel/arm/fp32/lsh_projection.cc | 8 +- .../src/runtime/kernel/arm/fp32/skip_gram.cc | 7 +- .../src/runtime/kernel/arm/fp32_grad/adam.cc | 6 - .../kernel/arm/fp32_grad/apply_momentum.cc | 6 - .../kernel/arm/fp32_grad/arithmetic_grad.cc | 5 - .../runtime/kernel/arm/fp32_grad/assign.cc | 6 - .../runtime/kernel/arm/fp32_grad/bias_grad.cc | 5 - .../runtime/kernel/arm/fp32_grad/bn_grad.cc | 6 - .../kernel/arm/fp32_grad/convolution.cc | 5 - .../arm/fp32_grad/convolution_grad_filter.cc | 5 - .../arm/fp32_grad/convolution_grad_input.cc | 6 - .../fp32_grad/deconvolution_grad_filter.cc | 6 - .../kernel/arm/fp32_grad/pooling_grad.cc | 6 - .../kernel/arm/fp32_grad/power_grad.cc | 5 - .../src/runtime/kernel/arm/fp32_grad/sgd.cc | 6 - .../softmax_cross_entropy_with_logits.cc | 6 - .../kernel/arm/fp32_grad/softmax_grad.cc | 6 - ...parse_softmax_cross_entropy_with_logits.cc | 5 - .../kernel/arm/fp32_grad/tuple_getitem.cc | 5 - .../int8/convolution_depthwise_3x3_int8.cc | 7 +- .../runtime/kernel/arm/string/normalize.cc | 5 - .../src/runtime/kernel/arm/string/predict.cc | 5 - .../kernel/opencl/subgraph_opencl_kernel.h | 1 + mindspore/lite/src/scheduler.cc | 3 - mindspore/lite/src/sub_graph_kernel.h | 2 + mindspore/lite/test/CMakeLists.txt | 1 + mindspore/lite/tools/converter/CMakeLists.txt | 1 + 39 files changed, 250 insertions(+), 275 deletions(-) create mode 100644 mindspore/lite/src/model_common.cc create mode 100644 mindspore/lite/src/model_common.h diff --git a/build.sh b/build.sh index 65f5140917..6d01289597 100755 --- a/build.sh +++ b/build.sh @@ -266,12 +266,15 @@ checkopts() COMPILE_LITE="on" if [[ "$OPTARG" == "arm64" ]]; then ENABLE_CONVERTER="off" + RUN_TESTCASES="on" LITE_PLATFORM="arm64" elif [[ "$OPTARG" == "arm32" ]]; then ENABLE_CONVERTER="off" + RUN_TESTCASES="on" LITE_PLATFORM="arm32" elif [[ "$OPTARG" == "x86_64" ]]; then ENABLE_CONVERTER="on" + RUN_TESTCASES="on" LITE_PLATFORM="x86_64" else echo "-I parameter must be arm64、arm32 or x86_64" @@ -315,7 +318,7 @@ checkopts() elif [[ "$OPTARG" == "object-c" ]]; then LITE_LANGUAGE="object-c" else - echo "-A parameter must be cpp、java or object-c" + echo "-A parameter must be cpp, java or object-c" exit 1 fi ;; @@ -628,9 +631,9 @@ build_minddata_lite_deps() } get_version() { - VERSION_MAJOR=`grep "const int ms_version_major =" ${BASEPATH}/mindspore/lite/include/version.h | tr -dc "[0-9]"` - VERSION_MINOR=`grep "const int ms_version_minor =" ${BASEPATH}/mindspore/lite/include/version.h | tr -dc "[0-9]"` - VERSION_REVISION=`grep "const int ms_version_revision =" ${BASEPATH}/mindspore/lite/include/version.h | tr -dc "[0-9]"` + VERSION_MAJOR=$(grep "const int ms_version_major =" ${BASEPATH}/mindspore/lite/include/version.h | tr -dc "[0-9]") + VERSION_MINOR=$(grep "const int ms_version_minor =" ${BASEPATH}/mindspore/lite/include/version.h | tr -dc "[0-9]") + VERSION_REVISION=$(grep "const int ms_version_revision =" ${BASEPATH}/mindspore/lite/include/version.h | tr -dc "[0-9]") VERSION_STR=${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_REVISION} } @@ -642,7 +645,9 @@ build_lite() echo "start build opencl" build_opencl fi - build_gtest + if [ "${RUN_TESTCASES}" == "on" ]; then + build_gtest + fi if [ "${COMPILE_MINDDATA_LITE}" == "lite" ] || [ "${COMPILE_MINDDATA_LITE}" == "full" ]; then build_minddata_lite_deps @@ -665,7 +670,7 @@ build_lite() -DANDROID_NDK="${ANDROID_NDK}" -DANDROID_ABI="arm64-v8a" -DANDROID_TOOLCHAIN_NAME="aarch64-linux-android-clang" \ -DANDROID_STL="c++_static" -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DSUPPORT_TRAIN=${SUPPORT_TRAIN} \ -DPLATFORM_ARM64=on -DENABLE_NEON=on -DENABLE_FP16="off" \ - -DENABLE_TOOLS=${ENABLE_TOOLS} -DENABLE_CONVERTER=${ENABLE_CONVERTER} -DBUILD_TESTCASES=on \ + -DENABLE_TOOLS=${ENABLE_TOOLS} -DENABLE_CONVERTER=${ENABLE_CONVERTER} -DBUILD_TESTCASES=${RUN_TESTCASES} \ -DSUPPORT_GPU=${ENABLE_GPU} -DOFFLINE_COMPILE=${OPENCL_OFFLINE_COMPILE} -DBUILD_MINDDATA=${COMPILE_MINDDATA_LITE} \ -DCMAKE_INSTALL_PREFIX=${BASEPATH}/output/tmp -DMS_VERSION_MAJOR=${VERSION_MAJOR} \ -DMS_VERSION_MINOR=${VERSION_MINOR} -DMS_VERSION_REVISION=${VERSION_REVISION} -DENABLE_VERBOSE=${ENABLE_VERBOSE} \ @@ -676,14 +681,14 @@ build_lite() -DANDROID_NDK="${ANDROID_NDK}" -DANDROID_ABI="armeabi-v7a" -DANDROID_TOOLCHAIN_NAME="clang" \ -DANDROID_STL="c++_static" -DCMAKE_BUILD_TYPE=${BUILD_TYPE} \ -DPLATFORM_ARM32=on -DENABLE_NEON=on -DSUPPORT_TRAIN=${SUPPORT_TRAIN} \ - -DENABLE_TOOLS=${ENABLE_TOOLS} -DENABLE_CONVERTER=${ENABLE_CONVERTER} -DBUILD_TESTCASES=on \ + -DENABLE_TOOLS=${ENABLE_TOOLS} -DENABLE_CONVERTER=${ENABLE_CONVERTER} -DBUILD_TESTCASES=${RUN_TESTCASES} \ -DSUPPORT_GPU=${ENABLE_GPU} -DOFFLINE_COMPILE=${OPENCL_OFFLINE_COMPILE} -DBUILD_MINDDATA=${COMPILE_MINDDATA_LITE} \ -DCMAKE_INSTALL_PREFIX=${BASEPATH}/output/tmp -DMS_VERSION_MAJOR=${VERSION_MAJOR} \ -DMS_VERSION_MINOR=${VERSION_MINOR} -DMS_VERSION_REVISION=${VERSION_REVISION} -DENABLE_VERBOSE=${ENABLE_VERBOSE} \ "${BASEPATH}/mindspore/lite" else cmake -DPLATFORM_ARM64=off -DSUPPORT_TRAIN=${SUPPORT_TRAIN} \ - -DENABLE_TOOLS=${ENABLE_TOOLS} -DENABLE_CONVERTER=${ENABLE_CONVERTER} -DBUILD_TESTCASES=on \ + -DENABLE_TOOLS=${ENABLE_TOOLS} -DENABLE_CONVERTER=${ENABLE_CONVERTER} -DBUILD_TESTCASES=${RUN_TESTCASES} \ -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DSUPPORT_GPU=${ENABLE_GPU} -DBUILD_MINDDATA=${COMPILE_MINDDATA_LITE} \ -DOFFLINE_COMPILE=${OPENCL_OFFLINE_COMPILE} -DCMAKE_INSTALL_PREFIX=${BASEPATH}/output/tmp \ -DMS_VERSION_MAJOR=${VERSION_MAJOR} -DMS_VERSION_MINOR=${VERSION_MINOR} -DMS_VERSION_REVISION=${VERSION_REVISION} \ @@ -718,8 +723,8 @@ build_lite_java_arm64() { cd ${BASEPATH}/output/ rm -rf mindspore-lite-${VERSION_STR}-runtime-arm64-cpu tar -zxvf mindspore-lite-${VERSION_STR}-runtime-arm64-cpu.tar.gz + [ -n "${JAVA_PATH}" ] && rm -rf ${JAVA_PATH}/java/app/libs/arm64-v8a/ mkdir -p ${JAVA_PATH}/java/app/libs/arm64-v8a/ - [ -n "${JAVA_PATH}" ] && rm -rf ${JAVA_PATH}/java/app/libs/arm64-v8a/* cp ${BASEPATH}/output/mindspore-lite-${VERSION_STR}-runtime-arm64-cpu/lib/libmindspore-lite.so ${JAVA_PATH}/java/app/libs/arm64-v8a/ cp ${BASEPATH}/output/mindspore-lite-${VERSION_STR}-runtime-arm64-cpu/lib/libmindspore-lite-fp16.so ${JAVA_PATH}/java/app/libs/arm64-v8a/ cp ${BASEPATH}/output/mindspore-lite-${VERSION_STR}-runtime-arm64-cpu/lib/libmindspore-lite-optimize.so ${JAVA_PATH}/java/app/libs/arm64-v8a/ @@ -738,10 +743,10 @@ build_lite_java_arm32() { fi # copy arm32 so cd ${BASEPATH}/output/ - rm -rf mindspore-lite-${VERSION_STR}runtime-arm32-cpu + rm -rf mindspore-lite-${VERSION_STR}-runtime-arm32-cpu tar -zxvf mindspore-lite-${VERSION_STR}-runtime-arm32-cpu.tar.gz + [ -n "${JAVA_PATH}" ] && rm -rf ${JAVA_PATH}/java/app/libs/armeabi-v7a/ mkdir -p ${JAVA_PATH}/java/app/libs/armeabi-v7a/ - [ -n "${JAVA_PATH}" ] && rm -rf ${JAVA_PATH}/java/app/libs/armeabi-v7a/* cp ${BASEPATH}/output/mindspore-lite-${VERSION_STR}-runtime-arm32-cpu/lib/libmindspore-lite.so ${JAVA_PATH}/java/app/libs/armeabi-v7a/ [ -n "${VERSION_STR}" ] && rm -rf mindspore-lite-${VERSION_STR}-runtime-arm32-cpu } diff --git a/mindspore/lite/include/lite_session.h b/mindspore/lite/include/lite_session.h index 2139fb9e1e..e2eed86a7b 100644 --- a/mindspore/lite/include/lite_session.h +++ b/mindspore/lite/include/lite_session.h @@ -35,7 +35,16 @@ class MS_API LiteSession { /// \param[in] context Define the context of session to be created. /// /// \return Pointer of MindSpore Lite LiteSession. - static LiteSession *CreateSession(lite::Context *context); + static LiteSession *CreateSession(const lite::Context *context); + + /// \brief Static method to create a LiteSession pointer which has already compiled a model. + /// + /// \param[in] model_buf Define the buffer read from a model file. + /// \param[in] size Define bytes number of model buffer. + /// \param[in] context Define the context of session to be created. + /// + /// \return Pointer of MindSpore Lite LiteSession. + static LiteSession *CreateSession(const char *model_buf, size_t size, const lite::Context *context); /// \brief Destructor of MindSpore Lite LiteSession. virtual ~LiteSession() = default; diff --git a/mindspore/lite/src/CMakeLists.txt b/mindspore/lite/src/CMakeLists.txt index dd815f6a01..474868e5ee 100644 --- a/mindspore/lite/src/CMakeLists.txt +++ b/mindspore/lite/src/CMakeLists.txt @@ -27,6 +27,7 @@ set(LITE_SRC ${CMAKE_CURRENT_SOURCE_DIR}/tensor.cc ${CMAKE_CURRENT_SOURCE_DIR}/executor.cc ${CMAKE_CURRENT_SOURCE_DIR}/inner_context.cc + ${CMAKE_CURRENT_SOURCE_DIR}/model_common.cc ${CMAKE_CURRENT_SOURCE_DIR}/kernel_registry.cc ${CMAKE_CURRENT_SOURCE_DIR}/lite_kernel.cc ${CMAKE_CURRENT_SOURCE_DIR}/sub_graph_kernel.cc diff --git a/mindspore/lite/src/lite_session.cc b/mindspore/lite/src/lite_session.cc index d0ace7304a..53f8cce585 100644 --- a/mindspore/lite/src/lite_session.cc +++ b/mindspore/lite/src/lite_session.cc @@ -26,6 +26,7 @@ #include "src/common/utils.h" #include "src/common/graph_util.h" #include "src/kernel_registry.h" +#include "src/model_common.h" namespace mindspore { namespace lite { @@ -284,6 +285,12 @@ int LiteSession::CompileGraph(Model *model) { return ret; } ret = executor->Prepare(this->kernels_); + if (ret != RET_OK) { + MS_LOG(ERROR) << "Prepare executor failed: " << ret; + is_running_.store(false); + return ret; + } + ret = PrepareKernels(); if (ret != RET_OK) { MS_LOG(ERROR) << "Prepare kernels failed: " << ret; is_running_.store(false); @@ -293,6 +300,17 @@ int LiteSession::CompileGraph(Model *model) { return RET_OK; } +int LiteSession::PrepareKernels() { + for (auto kernel : this->kernels_) { + auto ret = kernel->Prepare(); + if (ret != RET_OK) { + MS_LOG(ERROR) << "Prepare kernel " << kernel->name() << " failed: " << ret; + return ret; + } + } + return RET_OK; +} + std::vector LiteSession::GetInputs() const { return this->input_vec_; } int LiteSession::RunGraph(const KernelCallBack &before, const KernelCallBack &after) { @@ -312,7 +330,7 @@ int LiteSession::RunGraph(const KernelCallBack &before, const KernelCallBack &af return ret; } -int LiteSession::Init(Context *context) { +int LiteSession::Init(const Context *context) { bool expected = false; if (!is_running_.compare_exchange_strong(expected, true)) { MS_LOG(ERROR) << "Not support multi-threading"; @@ -508,7 +526,7 @@ int LiteSession::Resize(const std::vector &inputs } } // namespace lite -session::LiteSession *session::LiteSession::CreateSession(lite::Context *context) { +session::LiteSession *session::LiteSession::CreateSession(const lite::Context *context) { auto session = new lite::LiteSession(); auto ret = session->Init(context); if (ret != mindspore::lite::RET_OK) { @@ -518,4 +536,26 @@ session::LiteSession *session::LiteSession::CreateSession(lite::Context *context } return session; } + +session::LiteSession *session::LiteSession::CreateSession(const char *model_buf, size_t size, + const lite::Context *context) { + auto *session = LiteSession::CreateSession(context); + if (session == nullptr) { + MS_LOG(ERROR) << "Create sesssion failed"; + return nullptr; + } + auto *model = lite::ImportFromBuffer(model_buf, size, true); + if (model == nullptr) { + MS_LOG(ERROR) << "Import model failed"; + return nullptr; + } + auto ret = session->CompileGraph(model); + if (ret != lite::RET_OK) { + MS_LOG(ERROR) << "Compile model failed"; + return nullptr; + } + model->buf = nullptr; + delete (model); + return session; +} } // namespace mindspore diff --git a/mindspore/lite/src/lite_session.h b/mindspore/lite/src/lite_session.h index a213305c54..b6aae2b089 100644 --- a/mindspore/lite/src/lite_session.h +++ b/mindspore/lite/src/lite_session.h @@ -42,7 +42,7 @@ class LiteSession : public session::LiteSession { ~LiteSession() override; - virtual int Init(Context *context); + virtual int Init(const Context *context); void BindThread(bool if_bind) override; @@ -86,6 +86,8 @@ class LiteSession : public session::LiteSession { int ResizeInputs(const std::vector &inputs, const std::vector> &dims); + int PrepareKernels(); + private: void ResetInputsShape(const std::vector> &dims); diff --git a/mindspore/lite/src/model.cc b/mindspore/lite/src/model.cc index 644cb5da32..479035dc81 100644 --- a/mindspore/lite/src/model.cc +++ b/mindspore/lite/src/model.cc @@ -16,124 +16,10 @@ #include "src/ops/primitive_c.h" #include "include/model.h" #include "src/common/log_adapter.h" -#include "include/errorcode.h" -#include "src/common/graph_util.h" -#include "include/version.h" -#include "src/ops/ops_register.h" +#include "src/model_common.h" namespace mindspore::lite { - -bool ConvertNodes(const schema::MetaGraph *meta_graph, Model *model) { - for (size_t i = 0; i < meta_graph->nodes()->size(); ++i) { - Model::Node *node = new (std::nothrow) Model::Node(); - if (node == nullptr) { - MS_LOG(ERROR) << "new node fail!"; - return false; - } - auto c_node = meta_graph->nodes()->GetAs(i); - auto src_prim = c_node->primitive(); -#ifdef PRIMITIVE_WRITEABLE - node->primitive_ = PrimitiveC::Create(const_cast(src_prim)); -#else - auto primitive = const_cast(src_prim); - node->primitive_ = OpsRegistry::GetInstance()->getPrimitiveCreator(primitive->value_type())(primitive); -#endif - if (node->primitive_ == nullptr) { - MS_LOG(ERROR) << "unpack primitive == nullptr!"; - delete node; - return false; - } - node->primitive_->SetQuantType(c_node->quantType()); - node->name_ = c_node->name()->c_str(); - node->node_type_ = c_node->nodeType(); - auto count = c_node->inputIndex()->size(); - for (uint32_t j = 0; j < count; ++j) { - node->input_indices_.push_back(size_t(c_node->inputIndex()->GetAs(j))); - } - if (c_node->outputIndex() != nullptr) { - count = c_node->outputIndex()->size(); - for (uint32_t j = 0; j < count; ++j) { - node->output_indices_.push_back(size_t(c_node->outputIndex()->GetAs(j))); - } - } - model->nodes_.push_back(node); - } - return true; -} - -bool ConvertTensors(const schema::MetaGraph *meta_graph, Model *model) { - auto tensor_count = meta_graph->allTensors()->size(); - for (uint32_t i = 0; i < tensor_count; ++i) { - auto *tensor = meta_graph->allTensors()->GetAs(i); - if (tensor == nullptr) { - MS_LOG(ERROR) << i << "th tensor in model is nullptr"; - return false; - } - model->all_tensors_.push_back(const_cast(tensor)); - } - return true; -} - -Model *Model::Import(const char *model_buf, size_t size) { - if (model_buf == nullptr) { - MS_LOG(ERROR) << "The model buf is nullptr"; - return nullptr; - } - flatbuffers::Verifier verify((const uint8_t *)model_buf, size); - if (!schema::VerifyMetaGraphBuffer(verify)) { - MS_LOG(ERROR) << "The buffer is invalid and fail to create graph."; - return nullptr; - } - auto *model = new (std::nothrow) Model(); - if (model == nullptr) { - MS_LOG(ERROR) << "new model fail!"; - return nullptr; - } - model->buf = reinterpret_cast(malloc(size)); - if (model->buf == nullptr) { - MS_LOG(ERROR) << "new inner model buf fail!"; - delete (model); - return nullptr; - } - memcpy(model->buf, model_buf, size); - auto meta_graph = schema::GetMetaGraph(model->buf); - if (meta_graph == nullptr) { - MS_LOG(ERROR) << "meta_graph is nullptr!"; - delete (model); - return nullptr; - } - - if (meta_graph->name() != nullptr) { - model->name_ = meta_graph->name()->c_str(); - } - if (meta_graph->version() != nullptr) { - model->version_ = meta_graph->version()->c_str(); - } - - if (model->version_ != Version()) { - MS_LOG(WARNING) << "model version is " << model->version_ << ", inference version is " << Version() << " not equal"; - } - - auto in_count = meta_graph->inputIndex()->size(); - for (uint32_t i = 0; i < in_count; ++i) { - model->input_indices_.push_back(size_t(meta_graph->inputIndex()->GetAs(i))); - } - - auto out_count = meta_graph->outputIndex()->size(); - for (uint32_t i = 0; i < out_count; ++i) { - model->output_indices_.push_back(size_t(meta_graph->outputIndex()->GetAs(i))); - } - if (!ConvertNodes(meta_graph, model)) { - delete model; - return nullptr; - } - - if (!ConvertTensors(meta_graph, model)) { - delete model; - return nullptr; - } - return model; -} +Model *Model::Import(const char *model_buf, size_t size) { return ImportFromBuffer(model_buf, size, false); } void Model::Free() { if (this->buf != nullptr) { diff --git a/mindspore/lite/src/model_common.cc b/mindspore/lite/src/model_common.cc new file mode 100644 index 0000000000..92ed8c76fe --- /dev/null +++ b/mindspore/lite/src/model_common.cc @@ -0,0 +1,138 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "src/model_common.h" +#include "include/version.h" +#include "src/ops/ops_register.h" + +namespace mindspore::lite { +bool ConvertNodes(const schema::MetaGraph *meta_graph, Model *model) { + for (size_t i = 0; i < meta_graph->nodes()->size(); ++i) { + Model::Node *node = new (std::nothrow) Model::Node(); + if (node == nullptr) { + MS_LOG(ERROR) << "new node fail!"; + return false; + } + auto c_node = meta_graph->nodes()->GetAs(i); + auto src_prim = c_node->primitive(); +#ifdef PRIMITIVE_WRITEABLE + node->primitive_ = PrimitiveC::Create(const_cast(src_prim)); +#else + auto primitive = const_cast(src_prim); + node->primitive_ = OpsRegistry::GetInstance()->getPrimitiveCreator(primitive->value_type())(primitive); +#endif + if (node->primitive_ == nullptr) { + MS_LOG(ERROR) << "unpack primitive == nullptr!"; + delete node; + return false; + } + node->primitive_->SetQuantType(c_node->quantType()); + node->name_ = c_node->name()->c_str(); + node->node_type_ = c_node->nodeType(); + auto count = c_node->inputIndex()->size(); + for (uint32_t j = 0; j < count; ++j) { + node->input_indices_.push_back(size_t(c_node->inputIndex()->GetAs(j))); + } + if (c_node->outputIndex() != nullptr) { + count = c_node->outputIndex()->size(); + for (uint32_t j = 0; j < count; ++j) { + node->output_indices_.push_back(size_t(c_node->outputIndex()->GetAs(j))); + } + } + model->nodes_.push_back(node); + } + return true; +} + +bool ConvertTensors(const schema::MetaGraph *meta_graph, Model *model) { + auto tensor_count = meta_graph->allTensors()->size(); + for (uint32_t i = 0; i < tensor_count; ++i) { + auto *tensor = meta_graph->allTensors()->GetAs(i); + if (tensor == nullptr) { + MS_LOG(ERROR) << i << "th tensor in model is nullptr"; + return false; + } + model->all_tensors_.push_back(const_cast(tensor)); + } + return true; +} + +Model *ImportFromBuffer(const char *model_buf, size_t size, bool take_buf) { + if (model_buf == nullptr) { + MS_LOG(ERROR) << "The model buf is nullptr"; + return nullptr; + } + flatbuffers::Verifier verify((const uint8_t *)model_buf, size); + if (!schema::VerifyMetaGraphBuffer(verify)) { + MS_LOG(ERROR) << "The buffer is invalid and fail to create graph."; + return nullptr; + } + auto *model = new (std::nothrow) Model(); + if (model == nullptr) { + MS_LOG(ERROR) << "new model fail!"; + return nullptr; + } + if (take_buf) { + model->buf = const_cast(model_buf); + } else { + model->buf = reinterpret_cast(malloc(size)); + if (model->buf == nullptr) { + MS_LOG(ERROR) << "new inner model buf fail!"; + delete (model); + return nullptr; + } + memcpy(model->buf, model_buf, size); + } + + auto meta_graph = schema::GetMetaGraph(model->buf); + if (meta_graph == nullptr) { + MS_LOG(ERROR) << "meta_graph is nullptr!"; + delete (model); + return nullptr; + } + + if (meta_graph->name() != nullptr) { + model->name_ = meta_graph->name()->c_str(); + } + if (meta_graph->version() != nullptr) { + model->version_ = meta_graph->version()->c_str(); + } + + if (model->version_ != Version()) { + MS_LOG(WARNING) << "model version is " << model->version_ << ", inference version is " << Version() << " not equal"; + } + + auto in_count = meta_graph->inputIndex()->size(); + for (uint32_t i = 0; i < in_count; ++i) { + model->input_indices_.push_back(size_t(meta_graph->inputIndex()->GetAs(i))); + } + + auto out_count = meta_graph->outputIndex()->size(); + for (uint32_t i = 0; i < out_count; ++i) { + model->output_indices_.push_back(size_t(meta_graph->outputIndex()->GetAs(i))); + } + if (!ConvertNodes(meta_graph, model)) { + delete model; + return nullptr; + } + + if (!ConvertTensors(meta_graph, model)) { + delete model; + return nullptr; + } + return model; +} +} // namespace mindspore::lite diff --git a/mindspore/lite/src/model_common.h b/mindspore/lite/src/model_common.h new file mode 100644 index 0000000000..52113e7a79 --- /dev/null +++ b/mindspore/lite/src/model_common.h @@ -0,0 +1,29 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_SRC_MODEL_COMMON_H_ +#define MINDSPORE_LITE_SRC_MODEL_COMMON_H_ +#include "src/ops/primitive_c.h" +#include "include/model.h" + +namespace mindspore::lite { +bool ConvertNodes(const schema::MetaGraph *meta_graph, Model *model); + +bool ConvertTensors(const schema::MetaGraph *meta_graph, Model *model); + +Model *ImportFromBuffer(const char *model_buf, size_t size, bool take_buf); +} // namespace mindspore::lite +#endif // MINDSPORE_LITE_SRC_MODEL_COMMON_H_ diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/crop_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/crop_fp16.cc index 8dd153dd0c..7e49cf7515 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/crop_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/crop_fp16.cc @@ -77,6 +77,7 @@ int CropFp16CPUKernel::Run() { auto ret = ParallelLaunch(this->context_->thread_pool_, CropFp16Run, this, thread_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "ParallelLaunch failed: " << ret; + FreeInputAndOutput(); return ret; } if (out_tensors_.at(kOutputIndex)->data_type() == kNumberTypeFloat32) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_winograd_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_winograd_fp16.cc index 2ac3aef264..88e78b9b4c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_winograd_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_winograd_fp16.cc @@ -280,12 +280,6 @@ int DeConvWinogradFp16CPUKernel::Init() { } int DeConvWinogradFp16CPUKernel::Run() { - auto prepare_ret = Prepare(); - if (prepare_ret != RET_OK) { - MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; - return prepare_ret; - } - ConvolutionBaseFP16CPUKernel::GetExecuteTensor(); for (int batch_index = 0; batch_index < conv_param_->input_batch_; batch_index++) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/quant_dtype_cast_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/quant_dtype_cast_fp16.cc index 0df73c6ffa..562c1c9790 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/quant_dtype_cast_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/quant_dtype_cast_fp16.cc @@ -113,12 +113,6 @@ int QuantDTypeCastRun(void *cdata, int task_id) { } int QuantDTypeCastFp16CPUKernel::Run() { - auto prepare_ret = Prepare(); - if (prepare_ret != RET_OK) { - MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; - return prepare_ret; - } - if (in_tensors_[0]->data_type() == TypeId::kNumberTypeInt8 && out_tensors_[0]->data_type() == TypeId::kNumberTypeFloat16) { int8_ptr_ = reinterpret_cast(in_tensors_[0]->data_c()); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_winograd.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_winograd.cc index 71b8098cad..fe3dddd439 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_winograd.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_winograd.cc @@ -330,11 +330,6 @@ int DeConvolutionWinogradCPUKernel::DeDeconvPost(int task_id) { } int DeConvolutionWinogradCPUKernel::Run() { - auto prepare_ret = Prepare(); - if (prepare_ret != RET_OK) { - MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; - return prepare_ret; - } float *src_in = reinterpret_cast(in_tensors_[0]->data_c()); float *src_out = reinterpret_cast(out_tensors_[0]->data_c()); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/lsh_projection.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/lsh_projection.cc index ffd3fd865a..bde9294ffa 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/lsh_projection.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/lsh_projection.cc @@ -38,12 +38,6 @@ int LshProjectionCPUKernel::Init() { int LshProjectionCPUKernel::ReSize() { return RET_OK; } int LshProjectionCPUKernel::Run() { - auto ret = Prepare(); - if (ret != RET_OK) { - MS_LOG(ERROR) << "Prepare fail!ret: " << ret; - return ret; - } - auto input_tensor0 = in_tensors_.at(0); auto input_tensor1 = in_tensors_.at(1); auto out_tensor0 = out_tensors_.at(0); @@ -65,7 +59,7 @@ int LshProjectionCPUKernel::Run() { elements_num_ = input_tensor0->DimensionSize(0); count_unit_ = thread_num_ > 1 ? UP_DIV(elements_num_, thread_num_) : elements_num_; - ret = ParallelLaunch(this->context_->thread_pool_, LshProjectionRun, this, thread_num_); + auto ret = ParallelLaunch(this->context_->thread_pool_, LshProjectionRun, this, thread_num_); return ret; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/skip_gram.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/skip_gram.cc index c482cb5e4a..da9ee88467 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/skip_gram.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/skip_gram.cc @@ -60,11 +60,6 @@ void ParseSentenceToWords(const StringPack &sentence, std::vector *w } int SkipGramCPUKernel::Run() { - auto ret = Prepare(); - if (ret != RET_OK) { - MS_LOG(ERROR) << "Prepare fail!ret: " << ret; - return ret; - } skip_gram_parameter_ = reinterpret_cast(op_parameter_); if (skip_gram_parameter_->ngram_size < 1) { MS_LOG(ERROR) << "Skip Gram Parameter Error, NgramSize should be at least 1, get " @@ -105,7 +100,7 @@ int SkipGramCPUKernel::Run() { index--; } } - ret = mindspore::lite::WriteSeperatedStringsToTensor(out_tensors_[0], result); + auto ret = mindspore::lite::WriteSeperatedStringsToTensor(out_tensors_[0], result); return ret; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/adam.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/adam.cc index 7956e571cc..1b152a27bd 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/adam.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/adam.cc @@ -79,12 +79,6 @@ int AdamRun(void *cdata, int task_id) { } int AdamCPUKernel::Run() { - auto prepare_ret = Prepare(); - if (prepare_ret != RET_OK) { - MS_LOG(ERROR) << "AdamCPUKernel Prepare fail!ret: " << prepare_ret; - return prepare_ret; - } - int error_code = ParallelLaunch(this->context_->thread_pool_, AdamRun, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "Adam function error error_code[" << error_code << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.cc index 9cc9e85d5a..488cf8ac1f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.cc @@ -65,12 +65,6 @@ int ApplyMomentumRun(void *cdata, int task_id) { } int ApplyMomentumCPUKernel::Run() { - auto prepare_ret = Prepare(); - if (prepare_ret != RET_OK) { - MS_LOG(ERROR) << "ApplyMomentumCPUKernel Prepare fail!ret: " << prepare_ret; - return prepare_ret; - } - int error_code = ParallelLaunch(this->context_->thread_pool_, ApplyMomentumRun, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "Apply Momentum function error error_code[" << error_code << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc index 1a89ae99ed..e847c3cf64 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc @@ -202,11 +202,6 @@ int ArithmeticGradRun(void *cdata, int task_id) { } int ArithmeticGradCPUKernel::Run() { - auto ret = Prepare(); - if (ret != RET_OK) { - MS_LOG(ERROR) << "ArithmeticGradCPUKernel Prepare failed."; - return ret; - } int error_code = ParallelLaunch(this->context_->thread_pool_, ArithmeticGradRun, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "Arithmetic Grad function error error_code[" << error_code << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/assign.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/assign.cc index 862c203b43..1984ca1a01 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/assign.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/assign.cc @@ -52,12 +52,6 @@ int AssignRun(void *cdata, int task_id) { } int AssignCPUKernel::Run() { - auto prepare_ret = Prepare(); - if (prepare_ret != RET_OK) { - MS_LOG(ERROR) << "AssignCPUKernel Prepare fail!ret: " << prepare_ret; - return prepare_ret; - } - int error_code = ParallelLaunch(this->context_->thread_pool_, AssignRun, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "Assign function error error_code[" << error_code << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.cc index 5e60a330ee..16d9dd15ad 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.cc @@ -76,11 +76,6 @@ int BiasGradRun(void *cdata, int task_id) { } int BiasGradCPUKernel::Run() { - auto ret = Prepare(); - if (ret != RET_OK) { - MS_LOG(ERROR) << "BiasGradCPUKernel Prepare failed."; - return RET_ERROR; - } int error_code = ParallelLaunch(this->context_->thread_pool_, BiasGradRun, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "bias function error error_code[" << error_code << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc index 1db1a0d084..c821003cec 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc @@ -88,12 +88,6 @@ int BNGradRun(void *cdata, int task_id) { } int BNGradCPUKernel::Run() { - // std::cout << "run succ" << std::endl; - auto prepare_ret = Prepare(); - if (prepare_ret != RET_OK) { - MS_LOG(ERROR) << "BNGradCPUKernel Prepare fail!ret: " << prepare_ret; - return prepare_ret; - } int error_code = ParallelLaunch(this->context_->thread_pool_, BNGradRun, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "BN function error error_code[" << error_code << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.cc index 8ba98f13f6..e3f27962ba 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.cc @@ -115,11 +115,6 @@ int ConvolutionTrainRun(void *cdata, int task_id) { } int ConvolutionTrainCPUKernel::Run() { - auto prepare_ret = Prepare(); - if (prepare_ret != RET_OK) { - MS_LOG(ERROR) << "ConvolutionTrainCPUKernel Prepare fail!ret: " << prepare_ret; - return prepare_ret; - } int error_code = ParallelLaunch(this->context_->thread_pool_, ConvolutionTrainRun, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "conv train function error error_code[" << error_code << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc index 4606551c1e..c72306b009 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc @@ -117,11 +117,6 @@ int ConvolutionGradFilterRun(void *cdata, int task_id) { } int ConvolutionGradFilterCPUKernel::Run() { - auto prepare_ret = Prepare(); - if (prepare_ret != RET_OK) { - MS_LOG(ERROR) << "ConvolutionGradFilterCPUKernel Prepare fail!ret: " << prepare_ret; - return prepare_ret; - } int error_code = ParallelLaunch(this->context_->thread_pool_, ConvolutionGradFilterRun, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "conv filter function error error_code[" << error_code << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc index f7b53662a6..e7bbd1ce5b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc @@ -115,12 +115,6 @@ int ConvolutionGradInputRun(void *cdata, int task_id) { } int ConvolutionGradInputCPUKernel::Run() { - auto prepare_ret = Prepare(); - if (prepare_ret != RET_OK) { - MS_LOG(ERROR) << "ConvolutionGradInputCPUKernel Prepare fail!ret: " << prepare_ret; - return prepare_ret; - } - int error_code = ParallelLaunch(this->context_->thread_pool_, ConvolutionGradInputRun, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "bias function error error_code[" << error_code << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_filter.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_filter.cc index 6c7a7887d9..0133ffb0f4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_filter.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_filter.cc @@ -113,12 +113,6 @@ int DeConvolutionGradFilterRun(void *cdata, int task_id) { } int DeConvolutionGradFilterCPUKernel::Run() { - auto prepare_ret = Prepare(); - if (prepare_ret != RET_OK) { - MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; - return prepare_ret; - } - int error_code = ParallelLaunch(this->context_->thread_pool_, DeConvolutionGradFilterRun, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "conv filter function error error_code[" << error_code << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.cc index a8e900dc18..9c1727d917 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.cc @@ -88,12 +88,6 @@ int PoolingGradImpl(void *cdata, int task_id) { } int PoolingGradCPUKernel::Run() { - auto prepare_ret = Prepare(); - if (prepare_ret != RET_OK) { - MS_LOG(ERROR) << "PoolingGradCPUKernel Prepare fail!ret: " << prepare_ret; - return prepare_ret; - } - // clear output buffer before parallel run PoolingParameter *pooling_param = reinterpret_cast(op_parameter_); auto output_ptr = reinterpret_cast(out_tensors_.at(0)->MutableData()); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.cc index 02d0f0b796..c8650bc47f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.cc @@ -69,11 +69,6 @@ int PowerGradRun(void *cdata, int task_id) { } int PowerGradCPUKernel::Run() { - auto ret = Prepare(); - if (ret != RET_OK) { - MS_LOG(ERROR) << "PowerGradCPUKernel Prepare failed."; - return RET_ERROR; - } int error_code = ParallelLaunch(this->context_->thread_pool_, PowerGradRun, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "power grad function error error_code[" << error_code << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc index 9e0333bed6..01897195cc 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc @@ -65,12 +65,6 @@ int SgdRun(void *cdata, int task_id) { } int SgdCPUKernel::Run() { - auto prepare_ret = Prepare(); - if (prepare_ret != RET_OK) { - MS_LOG(ERROR) << "SgdCPUKernel Prepare fail!ret: " << prepare_ret; - return prepare_ret; - } - int error_code = ParallelLaunch(this->context_->thread_pool_, SgdRun, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "SGD function error error_code[" << error_code << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_cross_entropy_with_logits.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_cross_entropy_with_logits.cc index 45458f7e76..6a8d7c55a9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_cross_entropy_with_logits.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_cross_entropy_with_logits.cc @@ -91,12 +91,6 @@ int SoftmaxCrossEntropyWithLogitsRun(void *cdata, int task_id) { } int SoftmaxCrossEntropyWithLogitsCPUKernel::Run() { - auto ret = Prepare(); - if (ret != RET_OK) { - MS_LOG(ERROR) << "SoftmaxCrossEntropyWithLogitsCPUKernel Prepare failed."; - return ret; - } - int error_code = ParallelLaunch(this->context_->thread_pool_, SoftmaxCrossEntropyWithLogitsRun, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "SoftmaxCrossEntropy function error error_code[" << error_code << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.cc index 630066fa60..a4a03b04e0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.cc @@ -79,12 +79,6 @@ int SoftmaxGradRun(void *cdata, int task_id) { } int SoftmaxGradCPUKernel::Run() { - auto ret = Prepare(); - if (ret != RET_OK) { - MS_LOG(ERROR) << "SoftmaxGradCPUKernel Prepare failed."; - return ret; - } - int error_code = ParallelLaunch(this->context_->thread_pool_, SoftmaxGradRun, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "SoftmaxGradRun function error error_code[" << error_code << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc index 2c533efcb2..4e29045ef2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc @@ -118,11 +118,6 @@ int SparseSoftmaxCrossEntropyRun(void *cdata, int task_id) { } int SparseSoftmaxCrossEntropyWithLogitsCPUKernel::Run() { - auto ret = Prepare(); - if (ret != RET_OK) { - MS_LOG(ERROR) << "SparseSoftmaxCrossEntropyWithLogitsCPUKernel Prepare failed."; - return ret; - } int error_code = ParallelLaunch(this->context_->thread_pool_, SparseSoftmaxCrossEntropyRun, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "SparseSoftmaxCrossEntropy function error error_code[" << error_code << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/tuple_getitem.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/tuple_getitem.cc index c109db82db..29a7be3d6a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/tuple_getitem.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/tuple_getitem.cc @@ -63,11 +63,6 @@ int TupleRun(void *cdata, int task_id) { } int TupleGetItemCPUKernel::Run() { - auto ret = Prepare(); - if (ret != RET_OK) { - MS_LOG(ERROR) << "TupleGetItemCPUKernel Prepare failed."; - return RET_ERROR; - } int error_code = ParallelLaunch(this->context_->thread_pool_, TupleRun, this, 1); if (error_code != RET_OK) { MS_LOG(ERROR) << "tuple function error error_code[" << error_code << "]"; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_3x3_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_3x3_int8.cc index b4d12a6b67..a485bc3d0b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_3x3_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_3x3_int8.cc @@ -150,12 +150,7 @@ int ConvolutionDepthwise3x3Int8CPUKernel::InitBuffer() { } int ConvolutionDepthwise3x3Int8CPUKernel::Run() { - auto ret = Prepare(); - if (ret != RET_OK) { - MS_LOG(ERROR) << "Prepare failed."; - return RET_ERROR; - } - ret = InitBuffer(); + auto ret = InitBuffer(); if (ret != RET_OK) { MS_LOG(ERROR) << "Depthwise int8 ReSize error!"; return ret; diff --git a/mindspore/lite/src/runtime/kernel/arm/string/normalize.cc b/mindspore/lite/src/runtime/kernel/arm/string/normalize.cc index 35c158647d..159a22345b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/string/normalize.cc +++ b/mindspore/lite/src/runtime/kernel/arm/string/normalize.cc @@ -100,11 +100,6 @@ void NormalizeCPUKernel::FreeBuffer() { } int NormalizeCPUKernel::Run() { - auto ret = Prepare(); - if (ret != RET_OK) { - MS_LOG(ERROR) << "Prepare fail! Ret error code: " << ret; - return ret; - } auto input_tensor = in_tensors_.at(0); int string_num = lite::GetStringCount(input_tensor); std::vector all_string_pack = ParseTensorBuffer(input_tensor); diff --git a/mindspore/lite/src/runtime/kernel/arm/string/predict.cc b/mindspore/lite/src/runtime/kernel/arm/string/predict.cc index 4f9b30ba40..67978604a1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/string/predict.cc +++ b/mindspore/lite/src/runtime/kernel/arm/string/predict.cc @@ -73,11 +73,6 @@ std::vector PredictCPUKernel::GetLabelInfo() { static bool LabelInfoCmp(const LabelInfo &lhs, const LabelInfo &rhs) { return lhs.weight > rhs.weight; } int PredictCPUKernel::Run() { - auto ret = Prepare(); - if (ret != RET_OK) { - MS_LOG(ERROR) << "Prepare fail! Ret error code: " << ret; - return ret; - } std::vector label_info_vec = GetLabelInfo(); std::sort(label_info_vec.begin(), label_info_vec.end(), LabelInfoCmp); diff --git a/mindspore/lite/src/runtime/kernel/opencl/subgraph_opencl_kernel.h b/mindspore/lite/src/runtime/kernel/opencl/subgraph_opencl_kernel.h index fbbb4eddcd..b74141bf5f 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/subgraph_opencl_kernel.h +++ b/mindspore/lite/src/runtime/kernel/opencl/subgraph_opencl_kernel.h @@ -39,6 +39,7 @@ class SubGraphOpenCLKernel : public SubGraphKernel { : SubGraphKernel(inputs, outputs, inKernels, outKernels, nodes, ctx) { ocl_runtime_ = ocl_runtime_wrap_.GetInstance(); subgraph_type_ = kGpuSubGraph; + this->name_ = "GpuSubGraph"; this->executor_ = new lite::opencl::OpenCLExecutor(); } ~SubGraphOpenCLKernel() override; diff --git a/mindspore/lite/src/scheduler.cc b/mindspore/lite/src/scheduler.cc index c203d802b1..c5a8317ed0 100644 --- a/mindspore/lite/src/scheduler.cc +++ b/mindspore/lite/src/scheduler.cc @@ -35,9 +35,6 @@ using kernel::KERNEL_ARCH::kGPU; int Scheduler::Schedule(const lite::Model *model, std::vector *tensors, std::vector *kernels) { - // 1. op ---> kernel - // 2. sub graph - // 3. kernels (kernels --> subGraph) int ret = InferShape(model, tensors); if (ret != RET_OK) { MS_LOG(ERROR) << "op infer shape failed."; diff --git a/mindspore/lite/src/sub_graph_kernel.h b/mindspore/lite/src/sub_graph_kernel.h index 8614d4b041..2d4607f2fe 100644 --- a/mindspore/lite/src/sub_graph_kernel.h +++ b/mindspore/lite/src/sub_graph_kernel.h @@ -68,6 +68,7 @@ class CpuFp32SubGraph : public SubGraphKernel { const std::vector &nodes, const lite::InnerContext *ctx) : SubGraphKernel(inputs, outputs, in_kernels, out_kernels, nodes, ctx) { subgraph_type_ = kCpuFP32SubGraph; + this->name_ = "CpuFP32SubGraph"; this->executor_ = new mindspore::lite::Executor; } @@ -88,6 +89,7 @@ class CpuFp16SubGraph : public SubGraphKernel { const std::vector &nodes, const lite::InnerContext *ctx) : SubGraphKernel(inputs, outputs, in_kernels, out_kernels, nodes, ctx) { subgraph_type_ = kCpuFP16SubGraph; + this->name_ = "CpuFP16SubGraph"; this->executor_ = new mindspore::lite::Executor; } diff --git a/mindspore/lite/test/CMakeLists.txt b/mindspore/lite/test/CMakeLists.txt index 2153728bea..b472b440c7 100644 --- a/mindspore/lite/test/CMakeLists.txt +++ b/mindspore/lite/test/CMakeLists.txt @@ -120,6 +120,7 @@ set(TEST_LITE_SRC ${LITE_DIR}/src/lite_session.cc ${LITE_DIR}/src/sub_graph_kernel.cc ${LITE_DIR}/src/model.cc + ${LITE_DIR}/src/model_common.cc ${LITE_DIR}/src/populate_parameter.cc ${LITE_DIR}/src/scheduler.cc ${LITE_DIR}/src/common/graph_util.cc diff --git a/mindspore/lite/tools/converter/CMakeLists.txt b/mindspore/lite/tools/converter/CMakeLists.txt index 3d627986c6..d8d1bbd4a4 100644 --- a/mindspore/lite/tools/converter/CMakeLists.txt +++ b/mindspore/lite/tools/converter/CMakeLists.txt @@ -72,6 +72,7 @@ set(LITE_SRC ${SRC_DIR}/lite_session.cc ${SRC_DIR}/executor.cc ${SRC_DIR}/model.cc + ${SRC_DIR}/model_common.cc ) if (SUPPORT_TRAIN) set(LITE_SRC