!7344 [lite] fix unsupported op print and caffe multiple inputs

Merge pull request !7344 from 徐安越/r1.0
pull/7344/MERGE
mindspore-ci-bot 5 years ago committed by Gitee
commit 093653628b

@ -243,12 +243,12 @@ STATUS CaffeModelParser::ParseLayer(const caffe::NetParameter &proto, const caff
auto status_node = nodeParser->Parse(layer, layerP, op.get(), &weightVec);
if (status_node != RET_OK) {
interrupt = true;
if (status_node == RET_NOT_SUPPORT) {
if (status_node == RET_NOT_FIND_OP) {
NoSupportOp::GetInstance()->InsertOp(layer.type());
} else {
MS_LOG(ERROR) << "Parse weight for " << layer.name() << " Failed!";
}
status = (status == RET_OK ? RET_NOT_FIND_OP : status);
status = (status == RET_OK ? status_node : status);
continue;
}
@ -263,7 +263,7 @@ STATUS CaffeModelParser::ParseLayer(const caffe::NetParameter &proto, const caff
if (status_node != RET_OK) {
interrupt = true;
MS_LOG(ERROR) << "Set Op " << layer.name() << " Output Index Failed!";
status = (status == RET_OK ? RET_NOT_FIND_OP : status);
status = (status == RET_OK ? status_node : status);
continue;
}
@ -280,8 +280,15 @@ STATUS CaffeModelParser::GetModelInput(const caffe::NetParameter &proto, TensorC
continue;
}
std::unique_ptr<schema::TensorT> msTensor = std::make_unique<schema::TensorT>();
for (int j = 0; j < proto.input_dim_size(); j++) {
msTensor->dims.push_back(proto.input_dim(j));
if (proto.input_dim_size() > 4) {
int step = proto.input_dim_size() / proto.input_size();
for (int j = i * step; j < (i + 1) * step; j++) {
msTensor->dims.push_back(proto.input_dim(j));
}
} else {
for (int j = 0; j < proto.input_dim_size(); j++) {
msTensor->dims.push_back(proto.input_dim(j));
}
}
msTensor->refCount = schema::NodeType::NodeType_ValueNode;
msTensor->dataType = kNumberTypeFloat32;

@ -156,8 +156,8 @@ STATUS OnnxDeConvParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::N
if (attr->group != 1) {
if (!ParseGroupDeConvolution(attr, op)) {
MS_LOG(ERROR) << "Convert DeConvolution to DeDepthwise failed";
return RET_ERROR;
MS_LOG(ERROR) << "Convert DeConvolution to DeDepthwise failed, generalized group deconv hasn't support";
return RET_NOT_SUPPORT;
}
} else {
op->primitive->value.type = schema::PrimitiveType_DeConv2D;

@ -271,7 +271,7 @@ STATUS OnnxModelParser::ParseOnnxNodeToDstOp(const onnx::GraphProto &onnx_graph,
auto status = node_parser->Parse(onnx_graph, onnx_node, dst_op);
if (status != RET_OK) {
interrupt = true;
if (status == RET_NOT_SUPPORT) {
if (status == RET_NOT_FIND_OP) {
NoSupportOp::GetInstance()->InsertOp(onnx_node.op_type());
} else {
MS_LOG(ERROR) << "parser onnx node " << onnx_node.op_type() << " attr failed";

@ -100,10 +100,14 @@ STATUS TfliteModelParser::ConvertOp(const std::unique_ptr<tflite::ModelT> &tflit
for (const auto &tflite_op : tflite_subgraph->operators) {
auto tflite_op_type = (tflite_model->operator_codes[tflite_op->opcode_index])->builtin_code;
auto op_type = GetMSOpType(tflite_op_type);
if (op_type == "CUSTOM") {
if (op_type == "Custom") {
auto custom_type = (tflite_model->operator_codes[tflite_op->opcode_index])->custom_code;
MS_LOG(ERROR) << "CUSTOM op is not supported, the type is " << custom_type;
return RET_ERROR;
if (custom_type != "TFLite_Detection_PostProcess") {
MS_LOG(ERROR) << "CUSTOM op is not supported, the type is " << custom_type;
NoSupportOp::GetInstance()->InsertOp(custom_type);
status = (status == RET_OK ? RET_NOT_FIND_OP : status);
continue;
}
}
auto op = std::make_unique<schema::CNodeT>();
@ -121,7 +125,7 @@ STATUS TfliteModelParser::ConvertOp(const std::unique_ptr<tflite::ModelT> &tflit
status = node_parser->Parse(tflite_op, tflite_subgraph->tensors, tflite_model->buffers, op.get(), &tensorsId,
&tensorsFormat, &tensorsIdMap);
if (status != RET_OK) {
if (status == RET_NOT_SUPPORT) {
if (status == RET_NOT_FIND_OP) {
NoSupportOp::GetInstance()->InsertOp(op_type);
} else {
MS_LOG(ERROR) << "node " << op_type.c_str() << " parser failed";

@ -73,11 +73,11 @@ STATUS TflitePadParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_o
break;
default:
MS_LOG(ERROR) << "paddingmode:" << tflite_attr->mode << " don't support";
return RET_INVALID_OP_ATTR;
return RET_NOT_SUPPORT;
}
} else {
MS_LOG(ERROR) << "this pad:" << node_name << " hasn't been supported";
return RET_NOT_SUPPORT;
return RET_NOT_FIND_OP;
}
op->primitive->value.type = schema::PrimitiveType_Pad;

@ -72,7 +72,7 @@ STATUS TfliteReduceParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflit
} else if (std::strcmp(node_name, "ReduceAny") == 0) {
// attr->mode;
MS_LOG(ERROR) << "ms-lite haven't supported REDUCE_ANY now";
return RET_NOT_FIND_OP;
return RET_NOT_SUPPORT;
}
if (GetTfliteData(tflite_op->inputs[1], tflite_tensors, tflite_model_buffer, attr->axes)) {

Loading…
Cancel
Save