fix bug in anf_exporter and anf_importer

pull/4664/head
hangq 5 years ago
parent e5ed0105b7
commit 599b3cf240

@ -285,8 +285,6 @@ class AbstractTensor : public AbstractUndetermined {
AbstractBasePtr Broaden(uint8_t config = 0) const override; AbstractBasePtr Broaden(uint8_t config = 0) const override;
AbstractBasePtr BroadenWithShape() const; AbstractBasePtr BroadenWithShape() const;
AbstractBasePtr Join(const AbstractBasePtr &other) final; AbstractBasePtr Join(const AbstractBasePtr &other) final;
int format() const { return this->format_; }
void set_format(int format) { this->format_ = format; }
bool operator==(const AbstractTensor &other) const; bool operator==(const AbstractTensor &other) const;
bool operator==(const AbstractBase &other) const override; bool operator==(const AbstractBase &other) const override;
@ -303,9 +301,6 @@ class AbstractTensor : public AbstractUndetermined {
} }
return hash_sum; return hash_sum;
} }
protected:
int format_ = 0;
}; };
using AbstractTensorPtr = std::shared_ptr<AbstractTensor>; using AbstractTensorPtr = std::shared_ptr<AbstractTensor>;
using AbstractTensorPtrList = std::vector<AbstractTensorPtr>; using AbstractTensorPtrList = std::vector<AbstractTensorPtr>;

@ -60,6 +60,7 @@ add_compile_definitions(NO_DLIB)
add_compile_options(-fPIC) add_compile_options(-fPIC)
if("${CMAKE_BUILD_TYPE}" STREQUAL "Release") if("${CMAKE_BUILD_TYPE}" STREQUAL "Release")
#set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=hidden") #set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=hidden")
string(REPLACE "-g" " " CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
endif() endif()
if (BUILD_DEVICE) if (BUILD_DEVICE)

@ -1,37 +1,30 @@
set(LITE_SRC set(LITE_SRC
${CMAKE_CURRENT_SOURCE_DIR}/common/graph_util.cc ${CMAKE_CURRENT_SOURCE_DIR}/common/graph_util.cc
${CMAKE_CURRENT_SOURCE_DIR}/common/ms_tensor_utils.cc ${CMAKE_CURRENT_SOURCE_DIR}/common/ms_tensor_utils.cc
${CMAKE_CURRENT_SOURCE_DIR}/runtime/allocator.cc ${CMAKE_CURRENT_SOURCE_DIR}/runtime/allocator.cc
${CMAKE_CURRENT_SOURCE_DIR}/runtime/runtime_api.cc ${CMAKE_CURRENT_SOURCE_DIR}/runtime/runtime_api.cc
${CMAKE_CURRENT_SOURCE_DIR}/runtime/thread_pool.cc ${CMAKE_CURRENT_SOURCE_DIR}/runtime/thread_pool.cc
${CMAKE_CURRENT_SOURCE_DIR}/runtime/workspace_pool.cc ${CMAKE_CURRENT_SOURCE_DIR}/runtime/workspace_pool.cc
${CMAKE_CURRENT_SOURCE_DIR}/ir/tensor.cc ${CMAKE_CURRENT_SOURCE_DIR}/ir/tensor.cc
${CMAKE_CURRENT_SOURCE_DIR}/context.cc ${CMAKE_CURRENT_SOURCE_DIR}/context.cc
${CMAKE_CURRENT_SOURCE_DIR}/executor.cc ${CMAKE_CURRENT_SOURCE_DIR}/executor.cc
${CMAKE_CURRENT_SOURCE_DIR}/kernel_registry.cc ${CMAKE_CURRENT_SOURCE_DIR}/kernel_registry.cc
${CMAKE_CURRENT_SOURCE_DIR}/lite_kernel.cc ${CMAKE_CURRENT_SOURCE_DIR}/lite_kernel.cc
${CMAKE_CURRENT_SOURCE_DIR}/populate_parameter.cc ${CMAKE_CURRENT_SOURCE_DIR}/populate_parameter.cc
${CMAKE_CURRENT_SOURCE_DIR}/scheduler.cc ${CMAKE_CURRENT_SOURCE_DIR}/scheduler.cc
) ${CMAKE_CURRENT_SOURCE_DIR}/lite_session.cc
${CMAKE_CURRENT_SOURCE_DIR}/model.cc
if (SUPPORT_GPU)
list(APPEND LITE_SRC ${CMAKE_CURRENT_SOURCE_DIR}/runtime/kernel/opencl/subgraph_opencl_kernel.cc)
list(APPEND LITE_SRC ${CMAKE_CURRENT_SOURCE_DIR}/runtime/kernel/opencl/utils.cc)
endif ()
set(LITE_SRC
${LITE_SRC}
${CMAKE_CURRENT_SOURCE_DIR}/lite_session.cc
${CMAKE_CURRENT_SOURCE_DIR}/model.cc
) )
if (SUPPORT_GPU) if (SUPPORT_GPU)
set(LITE_SRC set(LITE_SRC
${LITE_SRC} ${LITE_SRC}
${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_executor.cc ${CMAKE_CURRENT_SOURCE_DIR}/runtime/kernel/opencl/subgraph_opencl_kernel.cc
${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_allocator.cc ${CMAKE_CURRENT_SOURCE_DIR}/runtime/kernel/opencl/utils.cc
${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_runtime.cc ${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_executor.cc
${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_wrapper.cc ${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_allocator.cc
${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_runtime.cc
${CMAKE_CURRENT_SOURCE_DIR}/runtime/opencl/opencl_wrapper.cc
) )
endif () endif ()

@ -147,6 +147,7 @@ class Tensor : public mindspore::tensor::MetaTensor {
} }
if (nullptr == allocator_) { if (nullptr == allocator_) {
free(this->data_); free(this->data_);
this->data_ = nullptr;
} else { } else {
allocator_->Free(this->data_); allocator_->Free(this->data_);
this->data_ = nullptr; this->data_ = nullptr;

@ -23,6 +23,7 @@
#include <utility> #include <utility>
#include "ir/dtype/type_id.h" #include "ir/dtype/type_id.h"
#include "schema/inner/model_generated.h"
namespace mindspore { namespace mindspore {
class ParamValueLite : public Value { class ParamValueLite : public Value {
@ -50,18 +51,20 @@ class ParamValueLite : public Value {
return size; return size;
} }
bool operator==(const Value &other) const override { bool operator==(const Value &other) const override { return this == &other; }
return this == &other;
} int format() const { return this->format_; }
void set_format(int format) { this->format_ = format; }
private: private:
void *tensor_addr_; void *tensor_addr_ = nullptr;
size_t tensor_size_; size_t tensor_size_ = 0;
std::vector<int> tensor_shape_; int format_ = schema::Format_KCHW;
TypeId type_id_; std::vector<int> tensor_shape_{};
TypeId type_id_ = TypeId::kNumberTypeFloat32;
}; };
using ParamValueLitePtr = std::shared_ptr<ParamValueLite>; using ParamValueLitePtr = std::shared_ptr<ParamValueLite>;
} // namespace mindspore } // namespace mindspore
#endif // MINDSPORE_LITE_SRC_PARAM_VALUE_LITE_H_ #endif // MINDSPORE_LITE_SRC_PARAM_VALUE_LITE_H_

@ -35,12 +35,12 @@ void AnfExporter::RemoveIfMakeTuple(const CNodePtr &cnode) {
inputs.emplace_back(cnode->input(0)); inputs.emplace_back(cnode->input(0));
for (size_t i = 1; i < cnode->inputs().size(); ++i) { for (size_t i = 1; i < cnode->inputs().size(); ++i) {
AnfNodePtr inputNode = cnode->input(i); AnfNodePtr input_node = cnode->input(i);
if (!inputNode->isa<CNode>()) { if (!input_node->isa<CNode>()) {
inputs.emplace_back(cnode->input(i)); inputs.emplace_back(cnode->input(i));
continue; continue;
} }
auto make_tuple_node = utils::cast<CNodePtr>(inputNode); auto make_tuple_node = utils::cast<CNodePtr>(input_node);
if (IsPrimitiveCNode(make_tuple_node, schema::PrimitiveType_MakeTuple)) { if (IsPrimitiveCNode(make_tuple_node, schema::PrimitiveType_MakeTuple)) {
has_make_tuple = true; has_make_tuple = true;
for (size_t j = 1; j < make_tuple_node->inputs().size(); ++j) { for (size_t j = 1; j < make_tuple_node->inputs().size(); ++j) {
@ -62,12 +62,12 @@ bool AnfExporter::RemoveIfTupleGetItem(const CNodePtr &cnode) {
inputs.clear(); inputs.clear();
inputs.emplace_back(cnode->input(0)); inputs.emplace_back(cnode->input(0));
for (size_t i = 1; i < cnode->inputs().size(); ++i) { for (size_t i = 1; i < cnode->inputs().size(); ++i) {
AnfNodePtr inputNode = cnode->input(i); AnfNodePtr input_node = cnode->input(i);
if (!inputNode->isa<CNode>()) { if (!input_node->isa<CNode>()) {
inputs.emplace_back(cnode->input(i)); inputs.emplace_back(cnode->input(i));
continue; continue;
} }
auto tuple_get_item_node = utils::cast<CNodePtr>(inputNode); auto tuple_get_item_node = utils::cast<CNodePtr>(input_node);
if (IsPrimitiveCNode(tuple_get_item_node, schema::PrimitiveType_TupleGetItem)) { if (IsPrimitiveCNode(tuple_get_item_node, schema::PrimitiveType_TupleGetItem)) {
has_tuple_get_item = true; has_tuple_get_item = true;
inputs.emplace_back(tuple_get_item_node->input(1)); inputs.emplace_back(tuple_get_item_node->input(1));
@ -76,7 +76,7 @@ bool AnfExporter::RemoveIfTupleGetItem(const CNodePtr &cnode) {
MS_LOG(ERROR) << "TupleGetItem's input 2 is not valuenode"; MS_LOG(ERROR) << "TupleGetItem's input 2 is not valuenode";
return false; return false;
} }
ValueNodePtr value_node = utils::cast<ValueNodePtr>(indexNode); auto value_node = utils::cast<ValueNodePtr>(indexNode);
map_remove_get_item_[tuple_get_item_node->input(1)->fullname_with_scope()] = GetValue<int>(value_node->value()); map_remove_get_item_[tuple_get_item_node->input(1)->fullname_with_scope()] = GetValue<int>(value_node->value());
} else { } else {
inputs.emplace_back(cnode->input(i)); inputs.emplace_back(cnode->input(i));
@ -92,15 +92,20 @@ bool AnfExporter::AddOutPutIfReturn(const std::unique_ptr<schema::MetaGraphT> &m
MS_ASSERT(meta_graphT != nullptr); MS_ASSERT(meta_graphT != nullptr);
MS_ASSERT(cnode != nullptr); MS_ASSERT(cnode != nullptr);
for (size_t i = 1; i < cnode->inputs().size(); ++i) { for (size_t i = 1; i < cnode->inputs().size(); ++i) {
auto inputNode = cnode->input(i); auto input_anode = cnode->input(i);
if (!inputNode->isa<CNode>()) { if (!input_anode->isa<CNode>()) {
MS_LOG(ERROR) << "Node of Return's input is not CNode"; MS_LOG(ERROR) << "Node of Return's input is not CNode";
return false; return false;
} }
auto inputCNode = utils::cast<CNodePtr>(inputNode); auto input_cnode = utils::cast<CNodePtr>(input_anode);
std::string inputName = inputNode->fullname_with_scope(); std::string input_name = input_anode->fullname_with_scope();
auto graphOutput = node_id_map_[inputName]; auto iter = node_id_map_.find(input_name);
meta_graphT->outputIndex.emplace_back(graphOutput); if (iter == node_id_map_.end()) {
MS_LOG(ERROR) << "Could not find output node";
return false;
}
auto graph_output = iter->second;
meta_graphT->outputIndex.emplace_back(graph_output);
} }
return true; return true;
} }
@ -198,10 +203,10 @@ schema::MetaGraphT *AnfExporter::Export(const FuncGraphPtr &func_graph) {
} }
map_remove_get_item_.clear(); map_remove_get_item_.clear();
RemoveIfMakeTuple(cnode); RemoveIfMakeTuple(cnode);
if (!RemoveIfTupleGetItem(cnode)) { // if (!RemoveIfTupleGetItem(cnode)) {
MS_LOG(ERROR) << "RemoveIfTupleGetItem failed"; // MS_LOG(ERROR) << "RemoveIfTupleGetItem failed";
return nullptr; // return nullptr;
} // }
if (primT->value.type == schema::PrimitiveType_Return) { if (primT->value.type == schema::PrimitiveType_Return) {
AddOutPutIfReturn(meta_graphT, cnode); AddOutPutIfReturn(meta_graphT, cnode);
@ -234,33 +239,54 @@ schema::MetaGraphT *AnfExporter::Export(const FuncGraphPtr &func_graph) {
return meta_graphT.release(); return meta_graphT.release();
} }
void AnfExporter::ConvertInputCNode(const std::shared_ptr<AnfNode> input_anode, schema::CNodeT *output_cnode) { int AnfExporter::ConvertInputCNode(const std::shared_ptr<AnfNode> input_anode, schema::CNodeT *output_cnode) {
std::string input_name = input_anode->fullname_with_scope(); std::string input_name = input_anode->fullname_with_scope();
if (!map_remove_get_item_.empty()) { auto input_cnode = utils::cast<CNodePtr>(input_anode);
for (auto name : map_remove_get_item_) { if (!IsPrimitiveCNode(input_cnode, schema::PrimitiveType_TupleGetItem)) {
if (name.first == input_name) { if (node_id_map_.find(input_name) != node_id_map_.end()) {
input_name = input_name + "_o:" + std::to_string(name.second); output_cnode->inputIndex.emplace_back(node_id_map_[input_name]);
}
} }
} else {
auto inputs = input_cnode->inputs();
if (inputs.size() != 3) {
MS_LOG(ERROR) << "TupleGetItem should have 3 inputs, got " << inputs.size();
return RET_ERROR;
}
auto get_item_input_cnode = inputs.at(1);
auto index_vnode = inputs.at(2);
if (!utils::isa<ValueNode>(index_vnode)) {
MS_LOG(ERROR) << "TupleGetItem's input 2 is not valuenode";
return RET_ERROR;
}
auto value_node = utils::cast<ValueNodePtr>(index_vnode);
if (value_node == nullptr) {
MS_LOG(ERROR) << "cast to ValueNode failed";
return RET_ERROR;
}
auto input_index_key =
get_item_input_cnode->fullname_with_scope() + "_o:" + std::to_string(GetValue<int>(value_node->value()));
auto iter = node_id_map_.find(input_index_key);
if (iter == node_id_map_.end()) {
MS_LOG(ERROR) << "Can not find get_item output tensor";
return RET_ERROR;
}
output_cnode->inputIndex.emplace_back(iter->second);
} }
if (node_id_map_.find(input_name) != node_id_map_.end()) { return RET_OK;
output_cnode->inputIndex.emplace_back(node_id_map_[input_name]);
}
} }
int AnfExporter::ConvertInputParameter(const std::shared_ptr<AnfNode> input_anode, size_t anode_index, int AnfExporter::ConvertInputParameter(const std::shared_ptr<AnfNode> input_anode,
const std::unique_ptr<schema::MetaGraphT> &meta_graphT, const std::unique_ptr<schema::MetaGraphT> &meta_graphT,
schema::CNodeT *output_cnode) { schema::CNodeT *output_cnode) {
std::string input_name = input_anode->fullname_with_scope();
auto paramNode = input_anode->cast<ParameterPtr>(); auto paramNode = input_anode->cast<ParameterPtr>();
if (paramNode->name().empty()) { std::string input_name = paramNode->fullname_with_scope();
paramNode->set_name(input_name + "_i:" + std::to_string(anode_index - 1)); if (node_id_map_.find(input_name) != node_id_map_.end()) {
}
if (node_id_map_.find(paramNode->name()) != node_id_map_.end()) {
output_cnode->inputIndex.emplace_back(node_id_map_[paramNode->name()]); output_cnode->inputIndex.emplace_back(node_id_map_[paramNode->name()]);
return RET_OK; return RET_OK;
} }
auto paramTensor = std::make_unique<schema::TensorT>(); auto paramTensor = std::make_unique<schema::TensorT>();
paramTensor->nodeType = schema::NodeType_ValueNode;
paramTensor->format = schema::Format_NHWC;
auto abstractBase = paramNode->abstract(); auto abstractBase = paramNode->abstract();
if (abstractBase == nullptr) { if (abstractBase == nullptr) {
MS_LOG(ERROR) << "Abstract of parameter is nullptr, " << paramNode->name(); MS_LOG(ERROR) << "Abstract of parameter is nullptr, " << paramNode->name();
@ -274,7 +300,6 @@ int AnfExporter::ConvertInputParameter(const std::shared_ptr<AnfNode> input_anod
auto typePtr = abstractTensor->element()->GetTypeTrack(); auto typePtr = abstractTensor->element()->GetTypeTrack();
MS_ASSERT(typePtr != nullptr); MS_ASSERT(typePtr != nullptr);
paramTensor->dataType = typePtr->type_id(); paramTensor->dataType = typePtr->type_id();
paramTensor->format = schema::Format(abstractTensor->format());
if (!utils::isa<abstract::ShapePtr>(abstractTensor->BuildShape())) { if (!utils::isa<abstract::ShapePtr>(abstractTensor->BuildShape())) {
MS_LOG(ERROR) << "Shape of Abstract of parameter should be ShapePtr, " << paramNode->name(); MS_LOG(ERROR) << "Shape of Abstract of parameter should be ShapePtr, " << paramNode->name();
return RET_ERROR; return RET_ERROR;
@ -282,11 +307,11 @@ int AnfExporter::ConvertInputParameter(const std::shared_ptr<AnfNode> input_anod
paramTensor->dims = utils::cast<abstract::ShapePtr>(abstractTensor->BuildShape())->shape(); paramTensor->dims = utils::cast<abstract::ShapePtr>(abstractTensor->BuildShape())->shape();
auto paramValue = std::dynamic_pointer_cast<ParamValueLite>(paramNode->default_param()); auto paramValue = std::dynamic_pointer_cast<ParamValueLite>(paramNode->default_param());
if (paramValue != nullptr) { if (paramValue != nullptr) {
paramTensor->nodeType = schema::NodeType_ValueNode;
paramTensor->data.resize(paramValue->tensor_size()); paramTensor->data.resize(paramValue->tensor_size());
paramTensor->format = schema::Format(paramValue->format());
memcpy(paramTensor->data.data(), paramValue->tensor_addr(), paramValue->tensor_size()); memcpy(paramTensor->data.data(), paramValue->tensor_addr(), paramValue->tensor_size());
} }
node_id_map_[paramNode->fullname_with_scope()] = meta_graphT->allTensors.size(); node_id_map_[input_name] = meta_graphT->allTensors.size();
output_cnode->inputIndex.emplace_back(meta_graphT->allTensors.size()); output_cnode->inputIndex.emplace_back(meta_graphT->allTensors.size());
meta_graphT->allTensors.emplace_back(std::move(paramTensor)); meta_graphT->allTensors.emplace_back(std::move(paramTensor));
return RET_OK; return RET_OK;
@ -345,9 +370,13 @@ int AnfExporter::SetOpInputNode(const CNodePtr &cnode, const std::unique_ptr<sch
auto input_node = cnode->input(i); auto input_node = cnode->input(i);
if (input_node->isa<CNode>()) { if (input_node->isa<CNode>()) {
is_graph_input = false; is_graph_input = false;
ConvertInputCNode(input_node, fb_node); auto ret = ConvertInputCNode(input_node, fb_node);
if (ret != RET_OK) {
MS_LOG(ERROR) << "ConvertInputCNode failed";
return RET_ERROR;
}
} else if (input_node->isa<Parameter>()) { } else if (input_node->isa<Parameter>()) {
auto ret = ConvertInputParameter(input_node, i, meta_graphT, fb_node); auto ret = ConvertInputParameter(input_node, meta_graphT, fb_node);
if (ret != RET_OK) { if (ret != RET_OK) {
MS_LOG(ERROR) << "ConvertInputParameter failed"; MS_LOG(ERROR) << "ConvertInputParameter failed";
return RET_ERROR; return RET_ERROR;

@ -1,7 +1,5 @@
/** /**
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). * Copyright 2020 Huawei Technologies Co., Ltd
*
* Copyright 2019 Huawei Technologies Co., Ltd
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -42,8 +40,8 @@ class AnfExporter {
bool AddOutPutIfReturn(const std::unique_ptr<schema::MetaGraphT> &meta_graphT, const CNodePtr &cnode); bool AddOutPutIfReturn(const std::unique_ptr<schema::MetaGraphT> &meta_graphT, const CNodePtr &cnode);
protected: protected:
void ConvertInputCNode(const std::shared_ptr<AnfNode> input_anode, schema::CNodeT *output_cnode); int ConvertInputCNode(const std::shared_ptr<AnfNode> input_anode, schema::CNodeT *output_cnode);
int ConvertInputParameter(const std::shared_ptr<AnfNode> input_anode, size_t anode_index, int ConvertInputParameter(const std::shared_ptr<AnfNode> input_anode,
const std::unique_ptr<schema::MetaGraphT> &meta_graphT, schema::CNodeT *output_cnode); const std::unique_ptr<schema::MetaGraphT> &meta_graphT, schema::CNodeT *output_cnode);
int ConvertInputValueNode(std::shared_ptr<AnfNode> input_anode, int ConvertInputValueNode(std::shared_ptr<AnfNode> input_anode,
const std::unique_ptr<schema::MetaGraphT> &meta_graphT, schema::CNodeT *output_cnode); const std::unique_ptr<schema::MetaGraphT> &meta_graphT, schema::CNodeT *output_cnode);

@ -1,6 +1,4 @@
/** /**
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
*
* Copyright 2020 Huawei Technologies Co., Ltd * Copyright 2020 Huawei Technologies Co., Ltd
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");

@ -1,6 +1,4 @@
/** /**
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
*
* Copyright 2020 Huawei Technologies Co., Ltd * Copyright 2020 Huawei Technologies Co., Ltd
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");

@ -1,8 +1,5 @@
/** /**
* This is the C++ adaptation and derivative work of Myia * Copyright 2020 Huawei Technologies Co., Ltd
* (https://github.com/mila-iqia/myia/).
*
* Copyright 2019 Huawei Technologies Co., Ltd
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -22,14 +19,12 @@
#include <vector> #include <vector>
#include <memory> #include <memory>
#include "tools/anf_importer/anf_populater/anf_node_populater_registry.h" #include "tools/anf_importer/anf_populater/anf_node_populater_registry.h"
#include "ir/func_graph.h"
#include "src/ir/tensor.h"
#include "tools/converter/quantizer/quantize_util.h" #include "tools/converter/quantizer/quantize_util.h"
namespace mindspore::lite { namespace mindspore::lite {
void AnfConvPopulater::PopulaterConv2DMultiGroup(const PrimitivePtr &prim, void AnfConvPopulater::PopulaterConv2DMultiGroup(const PrimitivePtr &prim,
const std::unique_ptr<schema::PrimitiveT> &primitive, const std::unique_ptr<schema::PrimitiveT> &primitive, const int &group,
const int &group, const std::vector<AnfNodePtr> &inputs) { const std::vector<AnfNodePtr> &inputs) {
auto attr = std::make_unique<schema::DepthwiseConv2DT>(); auto attr = std::make_unique<schema::DepthwiseConv2DT>();
auto format = GetValue<std::string>(prim->GetAttr("data_format")); auto format = GetValue<std::string>(prim->GetAttr("data_format"));
if (format == "NCHW") { if (format == "NCHW") {
@ -73,19 +68,11 @@ void AnfConvPopulater::PopulaterConv2DMultiGroup(const PrimitivePtr &prim,
attr->channelMultiplier = channel_mutiplier; attr->channelMultiplier = channel_mutiplier;
MS_ASSERT(inputs.size() == kAnfPopulaterTwo); MS_ASSERT(inputs.size() == kAnfPopulaterTwo);
auto inputNode = inputs[kAnfPopulaterOne]; auto input_node = inputs[kAnfPopulaterOne];
MS_ASSERT(inputNode != nullptr); MS_ASSERT(input_node != nullptr);
if (inputNode->isa<Parameter>()) { if (input_node->isa<Parameter>()) {
auto paramNode = inputNode->cast<ParameterPtr>(); auto param_node = input_node->cast<ParameterPtr>();
auto abstractBase = paramNode->abstract(); ConvertConvWeight<float>(param_node);
MS_ASSERT(abstractBase != nullptr);
if (utils::isa<abstract::AbstractTensorPtr>(abstractBase)) {
auto abstractTensor = utils::cast<abstract::AbstractTensorPtr>(abstractBase);
MS_ASSERT(abstractTensor != nullptr);
if (abstractTensor->format() == schema::Format_NCHW) {
abstractTensor->set_format(schema::Format_KCHW);
}
}
} }
primitive->value.type = schema::PrimitiveType_DepthwiseConv2D; primitive->value.type = schema::PrimitiveType_DepthwiseConv2D;
@ -144,10 +131,9 @@ void AnfConvPopulater::CalQuantParam(const double &mean, const double &stdDev, f
*mMax = static_cast<float>((qmax - mean) / stdDev); *mMax = static_cast<float>((qmax - mean) / stdDev);
} }
void AnfConvPopulater::PopulaterQuantParam( void AnfConvPopulater::PopulaterQuantParam(const PrimitivePtr &prim,
const PrimitivePtr &prim, std::vector<std::vector<schema::QuantParamT>> *vecInputQuantParam,
std::vector<std::vector<schema::QuantParamT>> *vecInputQuantParam, std::vector<std::vector<schema::QuantParamT>> *vecOutputQuantParam) {
std::vector<std::vector<schema::QuantParamT>> *vecOutputQuantParam) {
auto narrow_range = prim->GetAttr("narrow_range"); auto narrow_range = prim->GetAttr("narrow_range");
bool narrowRangeQuantParam = GetValue<bool>(narrow_range); bool narrowRangeQuantParam = GetValue<bool>(narrow_range);
auto num_bits = prim->GetAttr("num_bits"); auto num_bits = prim->GetAttr("num_bits");
@ -206,8 +192,7 @@ void AnfConvPopulater::PopulaterQuantParam(
quantParam.max = 0.0; quantParam.max = 0.0;
quantParam.zeroPoint = 0; quantParam.zeroPoint = 0;
quantParam.scale = quantParam.scale = vecInputQuantParam->at(0).at(0).scale * vecInputQuantParam->at(1).at(i).scale;
vecInputQuantParam->at(0).at(0).scale * vecInputQuantParam->at(1).at(i).scale;
quants.emplace_back(quantParam); quants.emplace_back(quantParam);
} }
vecInputQuantParam->emplace_back(quants); vecInputQuantParam->emplace_back(quants);

@ -1,8 +1,5 @@
/** /**
* This is the C++ adaptation and derivative work of Myia * Copyright 2020 Huawei Technologies Co., Ltd
* (https://github.com/mila-iqia/myia/).
*
* Copyright 2019 Huawei Technologies Co., Ltd
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -20,9 +17,13 @@
#ifndef MINDSPORE_ANF_CONV_PARSER_H #ifndef MINDSPORE_ANF_CONV_PARSER_H
#define MINDSPORE_ANF_CONV_PARSER_H #define MINDSPORE_ANF_CONV_PARSER_H
#include "tools/anf_importer/anf_populater/anf_node_populater.h"
#include <memory> #include <memory>
#include <vector> #include <vector>
#include "tools/anf_importer/anf_populater/anf_node_populater.h" #include "base/base_ref.h"
#include "abstract/abstract_value.h"
#include "src/param_value_lite.h"
#include "src/ir/tensor.h"
namespace mindspore::lite { namespace mindspore::lite {
class AnfConvPopulater : public AnfNodePopulater { class AnfConvPopulater : public AnfNodePopulater {
@ -33,18 +34,65 @@ class AnfConvPopulater : public AnfNodePopulater {
const std::vector<AnfNodePtr> &inputs) override; const std::vector<AnfNodePtr> &inputs) override;
private: private:
void PopulaterConv2DMultiGroup( template <typename T>
const PrimitivePtr &prim, void ConvertConvWeight(const ParameterPtr &param_node) {
const std::unique_ptr<schema::PrimitiveT> &primitive, const int &group, const std::vector<AnfNodePtr> &inputs); MS_ASSERT(param_node != nullptr);
void PopulaterConv2DSingleGroup( auto param = param_node->default_param();
const PrimitivePtr &prim, auto weight = std::dynamic_pointer_cast<ParamValueLite>(param);
const std::unique_ptr<schema::PrimitiveT> &primitive, const int &group); MS_ASSERT(weight != nullptr);
void PopulaterQuantParam(
const PrimitivePtr &prim, std::unique_ptr<T> buf(new (std::nothrow) T[weight->tensor_shape_size()]);
std::vector<std::vector<schema::QuantParamT>> *vecInputQuantParam, if (buf == nullptr) {
std::vector<std::vector<schema::QuantParamT>> *vecOutputQuantParam); MS_LOG(ERROR) << "new buf failed";
void CalQuantParam(const double &mean, const double &stdDev, float *mMin, return;
float *mMax); }
size_t filter_k = weight->tensor_shape()[0];
size_t filter_c = weight->tensor_shape()[1];
size_t filter_h = weight->tensor_shape()[2];
size_t filter_w = weight->tensor_shape()[3];
T *p1Buff = nullptr;
T *p2Buff = nullptr;
for (size_t k = 0; k < filter_k; ++k) {
for (size_t c = 0; c < filter_c; ++c) {
for (size_t h = 0; h < filter_h; ++h) {
for (size_t w = 0; w < filter_w; ++w) {
p1Buff = reinterpret_cast<float *>(weight->tensor_addr()) +
((k * filter_c * filter_h * filter_w) + (c * filter_h * filter_w) + (h * filter_w) + (w));
p2Buff =
buf.get() + ((c * filter_k * filter_h * filter_w) + (k * filter_h * filter_w) + (h * filter_w) + (w));
*p2Buff = *p1Buff;
}
}
}
}
auto ret = ::memcpy_s(weight->tensor_addr(), weight->tensor_shape_size() * sizeof(T), buf.get(),
weight->tensor_shape_size() * sizeof(T));
if (ret != EOK) {
MS_LOG(ERROR) << "memcpy_s failed: " << ret;
return;
}
auto abstract_base = param_node->abstract();
MS_ASSERT(abstract_base != nullptr);
if (utils::isa<abstract::AbstractTensorPtr>(abstract_base)) {
auto abstract_tensor = utils::cast<abstract::AbstractTensorPtr>(abstract_base);
utils::cast<abstract::ShapePtr>(abstract_tensor->BuildShape())->shape()[0] = filter_c;
utils::cast<abstract::ShapePtr>(abstract_tensor->BuildShape())->shape()[1] = filter_k;
utils::cast<abstract::ShapePtr>(abstract_tensor->BuildShape())->shape()[2] = filter_h;
utils::cast<abstract::ShapePtr>(abstract_tensor->BuildShape())->shape()[3] = filter_w;
}
return;
}
void PopulaterConv2DMultiGroup(const PrimitivePtr &prim, const std::unique_ptr<schema::PrimitiveT> &primitive,
const int &group, const std::vector<AnfNodePtr> &inputs);
void PopulaterConv2DSingleGroup(const PrimitivePtr &prim, const std::unique_ptr<schema::PrimitiveT> &primitive,
const int &group);
void PopulaterQuantParam(const PrimitivePtr &prim, std::vector<std::vector<schema::QuantParamT>> *vecInputQuantParam,
std::vector<std::vector<schema::QuantParamT>> *vecOutputQuantParam);
void CalQuantParam(const double &mean, const double &stdDev, float *mMin, float *mMax);
}; };
} // namespace mindspore::lite } // namespace mindspore::lite

@ -42,14 +42,14 @@ int AnfImporterFromMetaGraphT::ConverterConstTensor() {
auto type_id = static_cast<TypeId>(tensor->dataType); auto type_id = static_cast<TypeId>(tensor->dataType);
auto type_ptr = TypeIdToType(type_id); auto type_ptr = TypeIdToType(type_id);
auto abstract_tensor = std::make_shared<abstract::AbstractTensor>(type_ptr, shape); auto abstract_tensor = std::make_shared<abstract::AbstractTensor>(type_ptr, shape);
abstract_tensor->set_format(tensor->format);
parameter->set_abstract(abstract_tensor); parameter->set_abstract(abstract_tensor);
parameter->set_name("const_" + std::to_string(i)); parameter->set_name("const_" + std::to_string(i) + "_parameter");
ParamValueLitePtr param_value = std::make_shared<ParamValueLite>(); ParamValueLitePtr param_value = std::make_shared<ParamValueLite>();
MS_ASSERT(param_value != nullptr); MS_ASSERT(param_value != nullptr);
param_value->set_tensor_shape(shape); param_value->set_tensor_shape(shape);
param_value->set_tensor_type(type_id); param_value->set_tensor_type(type_id);
param_value->set_format(tensor->format);
if (!tensor->data.empty()) { if (!tensor->data.empty()) {
auto size = tensor->data.size(); auto size = tensor->data.size();
char *tensor_data = new (std::nothrow) char[size]; char *tensor_data = new (std::nothrow) char[size];
@ -138,6 +138,7 @@ void AnfImporterFromMetaGraphT::ConvertAbstract(const std::unique_ptr<schema::CN
auto get_item_value = NewValueNode(MakeValue<int>(i)); auto get_item_value = NewValueNode(MakeValue<int>(i));
std::vector<AnfNodePtr> inputs{tuple_get_item_prim, dst_cnode, get_item_value}; std::vector<AnfNodePtr> inputs{tuple_get_item_prim, dst_cnode, get_item_value};
CNodePtr get_item_cnode = func_graph_->NewCNode(inputs); CNodePtr get_item_cnode = func_graph_->NewCNode(inputs);
get_item_cnode->set_fullname_with_scope(src_cnode->name + "_getitem_" + std::to_string(i));
AddNode(out_tensor_id, get_item_cnode); AddNode(out_tensor_id, get_item_cnode);
} }
dst_cnode->set_abstract(std::make_shared<abstract::AbstractTuple>(abstract_list)); dst_cnode->set_abstract(std::make_shared<abstract::AbstractTuple>(abstract_list));
@ -170,27 +171,41 @@ int AnfImporterFromMetaGraphT::ConverterCNode() {
int AnfImporterFromMetaGraphT::AddReturnCNode() { int AnfImporterFromMetaGraphT::AddReturnCNode() {
MS_EXCEPTION_IF_NULL(meta_graph_); MS_EXCEPTION_IF_NULL(meta_graph_);
MS_EXCEPTION_IF_NULL(func_graph_); MS_EXCEPTION_IF_NULL(func_graph_);
std::vector<AnfNodePtr> make_tuple_inputs; if (meta_graph_->outputIndex.size() > 1) {
auto make_tuple_prim = NewValueNode(GetMakeTuplePrim()); std::vector<AnfNodePtr> make_tuple_inputs;
make_tuple_inputs.emplace_back(make_tuple_prim); auto make_tuple_prim = NewValueNode(GetMakeTuplePrim());
for (auto tensor_id : meta_graph_->outputIndex) { make_tuple_inputs.emplace_back(make_tuple_prim);
auto cNode = GetNode(tensor_id); for (auto tensor_id : meta_graph_->outputIndex) {
if (nullptr == cNode) { auto cNode = GetNode(tensor_id);
if (nullptr == cNode) {
MS_LOG(ERROR) << "Can't find input node.";
return RET_ERROR;
}
make_tuple_inputs.emplace_back(cNode);
}
auto make_tuple_cnode = func_graph_->NewCNode(make_tuple_inputs);
make_tuple_cnode->set_fullname_with_scope("return tuple");
std::vector<AnfNodePtr> op_inputs;
auto value_node = NewValueNode(GetReturnPrim());
op_inputs.emplace_back(value_node);
op_inputs.emplace_back(make_tuple_cnode);
auto cnode = func_graph_->NewCNode(op_inputs);
cnode->set_fullname_with_scope("return");
func_graph_->set_return(cnode);
} else {
auto value_node = NewValueNode(GetReturnPrim());
std::vector<AnfNodePtr> op_inputs{value_node};
auto cnode = GetNode(meta_graph_->outputIndex.front());
if (nullptr == cnode) {
MS_LOG(ERROR) << "Can't find input node."; MS_LOG(ERROR) << "Can't find input node.";
return RET_ERROR; return RET_ERROR;
} }
make_tuple_inputs.emplace_back(cNode); op_inputs.emplace_back(cnode);
auto return_cnode = func_graph_->NewCNode(op_inputs);
return_cnode->set_fullname_with_scope("return");
func_graph_->set_return(return_cnode);
} }
auto make_tuple_cnode = func_graph_->NewCNode(make_tuple_inputs);
make_tuple_cnode->set_fullname_with_scope("return tuple");
std::vector<AnfNodePtr> op_inputs;
auto value_node = NewValueNode(GetReturnPrim());
op_inputs.emplace_back(value_node);
op_inputs.emplace_back(make_tuple_cnode);
auto cnode = func_graph_->NewCNode(op_inputs);
cnode->set_fullname_with_scope("return");
func_graph_->set_return(cnode);
return RET_OK; return RET_OK;
} }

@ -167,9 +167,6 @@ STATUS WeightFormatHardCodePass::HardCodeMS(const std::unique_ptr<CNodeT> &node,
if (opType == PrimitiveType_Conv2D) { if (opType == PrimitiveType_Conv2D) {
weightTensor->format = Format_KCHW; weightTensor->format = Format_KCHW;
} else if (opType == PrimitiveType_DepthwiseConv2D) { } else if (opType == PrimitiveType_DepthwiseConv2D) {
if (weightTensor->format == Format_KCHW) {
TransFilterFormat<float>(weightTensor.get(), kKCHW2CKHW);
}
weightTensor->format = Format_CKHW; weightTensor->format = Format_CKHW;
} else { } else {
MS_LOG(ERROR) << "Unsupported opType: " << EnumNamePrimitiveType(opType) << ", node: " << node->name; MS_LOG(ERROR) << "Unsupported opType: " << EnumNamePrimitiveType(opType) << ", node: " << node->name;

Loading…
Cancel
Save