fix bug of mindir models, add ci test for one mindir model

pull/8783/head
wang_shaocong 4 years ago
parent 4aa84372f5
commit d33376bdf4

@ -22,8 +22,35 @@
namespace mindspore {
namespace lite {
#ifndef PRIMITIVE_WRITEABLE
#ifdef PRIMITIVE_WRITEABLE
int Floor::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) {
if (this->primitive_ == nullptr) {
this->primitive_ = new (std::nothrow) schema::PrimitiveT;
if (this->primitive_ == nullptr) {
MS_LOG(ERROR) << "new primitiveT failed";
return RET_ERROR;
}
this->primitive_->value.type = schema::PrimitiveType_Floor;
}
if (this->primitive_->value.type != schema::PrimitiveType_Floor) {
MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type;
delete this->primitive_;
this->primitive_ = nullptr;
return RET_ERROR;
}
if (this->primitive_->value.value == nullptr) {
auto attr = new (std::nothrow) schema::FloorT();
if (attr == nullptr) {
delete this->primitive_;
this->primitive_ = nullptr;
MS_LOG(ERROR) << "new primitiveT value failed";
return RET_ERROR;
}
this->primitive_->value.value = attr;
}
return RET_OK;
}
#else
int Floor::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);

@ -31,6 +31,7 @@ class Floor : public ArithmeticSelf {
#ifdef PRIMITIVE_WRITEABLE
MS_DECLARE_PARENT(Floor, ArithmeticSelf);
explicit Floor(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {}
int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs);
#else
int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
#endif

@ -219,12 +219,6 @@ void PrimitiveC::CalFloatScopeByMeanAndStddev(const double &mean, const double &
void PrimitiveC::FillDefaultInputQuantParamIfNeed(const size_t &inputSize) {
std::vector<schema::QuantParamT> quants;
schema::QuantParamT quantParam;
// fill input_quant_param_ by not inited quant_parm
if (input_quant_param_.size() < inputSize) {
schema::QuantParamT tmpQuantParam;
quants.emplace_back(tmpQuantParam);
input_quant_param_.insert(input_quant_param_.end(), inputSize - input_quant_param_.size(), quants);
}
if (input_quant_param_.size() == kDoubleNum) {
quants.clear();
@ -235,6 +229,12 @@ void PrimitiveC::FillDefaultInputQuantParamIfNeed(const size_t &inputSize) {
quants.emplace_back(quantParam);
input_quant_param_.emplace_back(quants);
}
// fill input_quant_param_ by not inited quant_parm
if (input_quant_param_.size() < inputSize) {
schema::QuantParamT tmpQuantParam;
quants.emplace_back(tmpQuantParam);
input_quant_param_.insert(input_quant_param_.end(), inputSize - input_quant_param_.size(), quants);
}
}
void PrimitiveC::PopulaterInputQuantParam(const Primitive &prim, const std::vector<AnfNodePtr> &inputs,
@ -574,6 +574,10 @@ std::shared_ptr<PrimitiveC> PrimitiveC::Create(const Primitive &prim, const std:
return NewPrimitiveC<Resize>(prim, inputs, quantType);
} else if (op_type == "ResizeBilinear") {
return NewPrimitiveC<Resize>(prim, inputs, quantType);
} else if (op_type == "Floor") {
return NewPrimitiveC<Floor>(prim, inputs, quantType);
} else if (op_type == "Minimum") {
return NewPrimitiveC<Minimum>(prim, inputs, quantType);
#ifdef SUPPORT_TRAIN
} else if (op_type == "SoftmaxCrossEntropyWithLogits") {

@ -9,3 +9,4 @@ mobilefacenet_iod.mindir
effnet_iod.mindir
resnext50.mindir
ocr_mobilenetV2.mindir
mindspore_ghostnet_ssd_13x.mindir

@ -791,13 +791,13 @@ int AnfImporterFromProtobuf::ImportNodesForGraph(const FuncGraphPtr &outputFuncG
cnode_ptr = BuildCNodeForFuncGraph(outputFuncGraph, node_proto, quantType);
if (cnode_ptr == nullptr) {
MS_LOG(ERROR) << "Build CNode for funcgraph fail at index: : " << i;
status = (status == RET_OK ? RET_NULL_PTR : status);
return RET_ERROR;
}
auto primitive_c = GetValueNode<std::shared_ptr<PrimitiveC>>(cnode_ptr->input(0));
if (primitive_c == nullptr) {
MS_LOG(ERROR) << "primitive_c is nullptr";
status = RET_ERROR;
return RET_ERROR;
}
#ifdef SUPPORT_TRAIN

@ -55,12 +55,15 @@ FuncGraphPtr AnfTransform::Transform(const FuncGraphPtr &old_graph, const conver
MS_LOG(ERROR) << "config shoud be specified";
return nullptr;
}
// fusion const_fold
auto optimizer = std::make_shared<opt::GraphOptimizer>();
auto pm = std::make_shared<opt::PassManager>("anf fusion pass manager", false);
auto graph_pm = std::make_shared<opt::PassManager>("anf graph pass manager", true);
auto convert_pm = std::make_shared<opt::PassManager>("anf graph convert pass manager", true);
// fusion const_fold
auto cf_pm = std::make_shared<opt::PassManager>("constant folding pass manager", false);
cf_pm->AddPass(std::make_shared<opt::ConstFoldPass>());
// for now - trainning is not supporting fuse operations
if (config != nullptr && !config->trainModel) {
// remove quantdtype when awaretraining
@ -114,9 +117,9 @@ FuncGraphPtr AnfTransform::Transform(const FuncGraphPtr &old_graph, const conver
remove_unused_transpose_pass->SetFmkType(config->fmk);
pm->AddPass(remove_unused_transpose_pass);
}
pm->AddPass(std::make_shared<opt::ConstFoldPass>());
pm->AddPass(std::make_shared<opt::ConvConvFusion>());
convert_pm->AddPass(std::make_shared<opt::ClipConvertActivationPass>());
optimizer->AddPassManager(cf_pm);
optimizer->AddPassManager(convert_pm);
optimizer->AddPassManager(pm);
optimizer->AddPassManager(graph_pm);

@ -833,7 +833,7 @@ STATUS PostTrainingQuantizer::QuantNode() {
MS_LOG(WARNING) << "index value node is null";
continue;
}
size_t index = GetValue<int>(index_value_node->value());
size_t index = CastToInt(index_value_node->value(), false).front();
auto input_node = cnode->input(1);
MS_ASSERT(input_node != nullptr);
auto input_cnode = std::dynamic_pointer_cast<mindspore::CNode>(input_node);

@ -510,7 +510,7 @@ bool CheckIsAllInputsParam(const AnfNodePtr &node) {
if (utils::isa<CNode>(node)) {
auto cnode = node->cast<CNodePtr>();
for (size_t i = 1; i < cnode->inputs().size(); i++) {
if (!utils::isa<Parameter>(cnode->input(i))) {
if (!utils::isa<Parameter>(cnode->input(i)) && !utils::isa<ValueNodePtr>(cnode->input(i))) {
return false;
}
}
@ -589,7 +589,7 @@ size_t GetTupleGetItemOutIndex(const CNodePtr &tuple_get_item) {
MS_ASSERT(output_index_value_node != nullptr);
auto value_node = output_index_value_node->cast<ValueNodePtr>();
MS_ASSERT(value_node != nullptr);
return IntToSize(GetValue<int>(value_node->value()));
return IntToSize(lite::CastToInt(value_node->value(), false).front());
}
std::shared_ptr<std::vector<std::pair<AnfNodePtr, int>>> GetRealNodeUsedListByOutputIdx(const FuncGraphPtr &graph,
const AnfNodePtr &node,

@ -121,7 +121,12 @@ lite::STATUS WeightFormatHardCodePass::HardCodeMS(const AnfNodePtr &conv_node,
if (op_type == schema::PrimitiveType_Conv2D) {
param_value->set_format(schema::Format::Format_KCHW);
} else if (op_type == schema::PrimitiveType_DepthwiseConv2D) {
param_value->set_format(schema::Format::Format_CKHW);
// the format is initialized to NUM_OF_FORMAT, and set to NHWC in const folding.
if (param_value->format() == schema::Format::Format_NHWC) {
param_value->set_format(schema::Format::Format_KCHW);
} else {
param_value->set_format(schema::Format::Format_CKHW);
}
} else if (op_type == schema::PrimitiveType_DeDepthwiseConv2D) {
param_value->set_format(schema::Format::Format_CKHW);
} else if (op_type == schema::PrimitiveType_DeConv2D) {

Loading…
Cancel
Save