append onnx parser

pull/7327/head
xuanyue 4 years ago
parent 25dd36059b
commit 7f42991624

@ -219,6 +219,10 @@ union PrimitiveType {
Sgd,
Adam,
GroupConv2DGradInput,
Loop,
NonMaxSuppression,
InstanceNorm,
Identity,
}
enum QuantType: int {
@ -250,6 +254,7 @@ table MetaGraph {
mempoolSize: uint;
nodes: [CNode];
allTensors: [Tensor]; // weight + input + output
subGraph : [MetaGraph];
}
root_type MetaGraph;

@ -18,8 +18,28 @@ namespace mindspore.schema;
enum ResizeMethod: byte {
UNKNOW = -1,
BILINEAR = 0,
NEAREST_NEIGHBOR = 1
LINEAR = 0,
NEAREST = 1,
CUBIC = 2
}
enum CoordinateTransformMode: byte {
COMMON = 0,
HALF_PIXEL = 1,
PYTORCH_HALF_PIXEL = 2,
TF_HALF_PIXEL = 3,
TF_CROP_AND_RESIZE = 4,
ALIGN_CORNERS = 5,
ASYMMETRIC = 6,
ALIGN_CORNERS_WITH_HALF_PIEXL = 7
}
enum NearestMode : byte {
NORMAL = 0,
ROUND_HALF_DOWN = 1,
ROUND_HALF_UP = 2,
FLOOR = 3,
CEIL = 4
}
enum Format : int {
@ -376,8 +396,13 @@ table Resize {
method: ResizeMethod;
newHeight: long;
newWidth: long;
alignCorners: bool = false;
alignCorners: bool = false; // DEPRECATED IN FUTURE: use 'coordinateTransformMode' instead.
preserveAspectRatio: bool = false;
coordinateTransformMode : CoordinateTransformMode;
cubicCoeff : float;
excludeOutside : int;
extrapolationValue : float = 0;
nearestMode : NearestMode;
}
table DetectionPostProcess {
@ -1054,3 +1079,21 @@ table FftReal {
table FftImag {
}
table NonMaxSuppression {
maxOutBoxPerClass : int = 0;
iouThreshold : float = 0;
scoreThreshold : float = 0;
centerPointBox : int = 0;
}
table InstanceNorm {
epsilon : float = 0.00001;
}
table Loop {
subGraphIndex : int;
}
table Identity {
}

@ -51,9 +51,9 @@ int Resize::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inp
if (this->primitive_->value.value == nullptr) {
auto attr = new (std::nothrow) schema::ResizeT();
if (prim.instance_name() == "ResizeNearestNeighbor") {
attr->method = schema::ResizeMethod_NEAREST_NEIGHBOR;
attr->method = schema::ResizeMethod_NEAREST;
} else if (prim.instance_name() == "ResizeBilinear") {
attr->method = schema::ResizeMethod_BILINEAR;
attr->method = schema::ResizeMethod_LINEAR;
} else {
MS_LOG(ERROR) << "wrong resize type";
return RET_ERROR;

@ -41,8 +41,8 @@ int ResizeBaseCPUKernel::CheckParameters() {
return RET_NULL_PTR;
}
method_ = parameter->method_;
if (method_ != static_cast<int>(schema::ResizeMethod_BILINEAR) &&
method_ != static_cast<int>(schema::ResizeMethod_NEAREST_NEIGHBOR)) {
if (method_ != static_cast<int>(schema::ResizeMethod_LINEAR) &&
method_ != static_cast<int>(schema::ResizeMethod_NEAREST)) {
MS_LOG(ERROR) << "Resize method should be bilinear or nearest_neighbor, but got " << method_;
return RET_INVALID_OP_ATTR;
}

@ -14,11 +14,11 @@
* limitations under the License.
*/
#include <algorithm>
#include "src/runtime/kernel/arm/fp32/resize.h"
#include "schema/model_generated.h"
#include "nnacl/fp32/resize.h"
#include <algorithm>
#include "include/errorcode.h"
#include "nnacl/fp32/resize.h"
#include "schema/model_generated.h"
#include "src/runtime/runtime_api.h"
using mindspore::kernel::KERNEL_ARCH::kCPU;
@ -41,7 +41,7 @@ int ResizeCPUKernel::Init() {
int ResizeCPUKernel::ReSize() {
int ret = RET_OK;
if (method_ == static_cast<int>(schema::ResizeMethod_BILINEAR)) {
if (method_ == static_cast<int>(schema::ResizeMethod_LINEAR)) {
FreeTmpBuffer();
ret = MallocTmpBuffer();
if (ret != RET_OK) {
@ -162,7 +162,7 @@ int ResizeCPUKernel::RunImpl(int task_id) {
int ret = 0;
switch (method_) {
case static_cast<int>(schema::ResizeMethod_BILINEAR): {
case static_cast<int>(schema::ResizeMethod_LINEAR): {
int n_h_begin, n_h_end;
int n = out_tensors_.at(0)->shape()[0];
int h = new_height_;
@ -178,7 +178,7 @@ int ResizeCPUKernel::RunImpl(int task_id) {
break;
}
case static_cast<int>(schema::ResizeMethod_NEAREST_NEIGHBOR): {
case static_cast<int>(schema::ResizeMethod_NEAREST): {
if (in_tensors_.size() == lite::kDoubleNum && !const_shape_) {
auto out_shape = in_tensors_.at(1);
auto data = reinterpret_cast<int32_t *>(out_shape->MutableData());

@ -14,12 +14,12 @@
* limitations under the License.
*/
#include "src/runtime/kernel/arm/int8/resize_int8.h"
#include <vector>
#include "src/kernel_registry.h"
#include "include/errorcode.h"
#include "nnacl/int8/resize.h"
#include "schema/model_generated.h"
#include "include/errorcode.h"
#include "src/runtime/kernel/arm/int8/resize_int8.h"
#include "src/kernel_registry.h"
#include "src/runtime/runtime_api.h"
using mindspore::kernel::KERNEL_ARCH::kCPU;
@ -84,7 +84,7 @@ int ResizeInt8CPUKernel::RunImpl(int task_id) {
int ret = 0;
switch (method_) {
case static_cast<int>(schema::ResizeMethod_BILINEAR): {
case static_cast<int>(schema::ResizeMethod_LINEAR): {
if (quant_in_->zp_ == 0) {
ret = ResizeBilinearInt8(input_data, output_data, input_shape.data(), out_tensors_[0]->shape().data(),
align_corners_, quant_in_, quant_out_, multiplier_, task_id, context_->thread_num_);
@ -95,7 +95,7 @@ int ResizeInt8CPUKernel::RunImpl(int task_id) {
}
break;
}
case static_cast<int>(schema::ResizeMethod_NEAREST_NEIGHBOR): {
case static_cast<int>(schema::ResizeMethod_NEAREST): {
bool same_zp = quant_in_->zp_ == quant_out_->zp_;
bool same_scale = abs(quant_out_->scale_ - quant_in_->scale_) < 1e-6;
if (same_zp && same_scale) {

@ -14,12 +14,12 @@
* limitations under the License.
*/
#include "src/runtime/kernel/opencl/kernel/resize.h"
#include <map>
#include <set>
#include <string>
#include <map>
#include "include/errorcode.h"
#include "src/kernel_registry.h"
#include "src/runtime/kernel/opencl/kernel/resize.h"
#include "src/runtime/kernel/opencl/cl/resize.cl.inc"
using mindspore::kernel::KERNEL_ARCH::kGPU;
@ -46,9 +46,9 @@ int ResizeOpenCLKernel::Init() {
return RET_PARAM_INVALID;
}
std::string kernel_name = "resize";
if (resize_param->method_ == schema::ResizeMethod_BILINEAR) {
if (resize_param->method_ == schema::ResizeMethod_LINEAR) {
kernel_name += "_bilinear";
} else if (resize_param->method_ == schema::ResizeMethod_NEAREST_NEIGHBOR) {
} else if (resize_param->method_ == schema::ResizeMethod_NEAREST) {
kernel_name += "_nearest_neighbor";
} else {
MS_LOG(ERROR) << "unsupported resize method:" << resize_param->method_;

@ -14,11 +14,11 @@
* limitations under the License.
*/
#include <vector>
#include "common/common_test.h"
#include "mindspore/lite/src/kernel_registry.h"
#include "mindspore/lite/src/lite_kernel.h"
#include "mindspore/lite/src/tensor.h"
#include "common/common_test.h"
#include "nnacl/resize_parameter.h"
#include "mindspore/lite/src/kernel_registry.h"
#include "schema/ops_generated.h"
using mindspore::schema::Format_NHWC;
@ -62,7 +62,7 @@ void TestResizeBilinearFp32::Prepare(const std::vector<int> &input_shape, const
out_tensor_.SetData(output_data);
ResizeParameter param_ = {
{}, static_cast<int>(schema::ResizeMethod_BILINEAR), output_shape[1], output_shape[2], align_corners};
{}, static_cast<int>(schema::ResizeMethod_LINEAR), output_shape[1], output_shape[2], align_corners};
desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Resize};
ctx_ = lite::InnerContext();
ctx_.thread_num_ = thread_num;

@ -16,7 +16,7 @@
#include <vector>
#include "common/common_test.h"
#include "nnacl/resize_parameter.h"
#include "mindspore/lite/src/kernel_registry.h"
#include "src/kernel_registry.h"
namespace mindspore {
@ -57,7 +57,7 @@ void TestResizeNearestNeighborFp32::Prepare(const std::vector<int> &input_shape,
out_tensor_.SetData(output_data);
ResizeParameter param_ = {
{}, static_cast<int>(schema::ResizeMethod_NEAREST_NEIGHBOR), output_shape[1], output_shape[2], align_corners};
{}, static_cast<int>(schema::ResizeMethod_NEAREST), output_shape[1], output_shape[2], align_corners};
desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Resize};
ctx_ = lite::InnerContext();
ctx_.thread_num_ = thread_num;

@ -19,7 +19,7 @@
#include "include/context.h"
#include "src/tensor.h"
#include "common/common_test.h"
#include "mindspore/lite/src/kernel_registry.h"
#include "src/kernel_registry.h"
#include "nnacl/int8/resize.h"
namespace mindspore {
@ -68,7 +68,7 @@ void TestResizeBilinearInt8::Prepare(const std::vector<int> &in_shape, const std
inputs.push_back(&in_tensor);
outputs.push_back(&out_tensor);
param_.method_ = static_cast<int>(schema::ResizeMethod_BILINEAR);
param_.method_ = static_cast<int>(schema::ResizeMethod_LINEAR);
param_.new_width_ = out_shape[2];
param_.new_height_ = out_shape[1];
param_.align_corners_ = align_corners;

@ -19,7 +19,7 @@
#include "include/context.h"
#include "src/tensor.h"
#include "common/common_test.h"
#include "mindspore/lite/src/kernel_registry.h"
#include "src/kernel_registry.h"
#include "nnacl/int8/resize.h"
namespace mindspore {
@ -63,7 +63,7 @@ void TestResizeNearestNeighborInt8::Prepare(const std::vector<int> &in_shape, co
inputs.push_back(&in_tensor);
outputs.push_back(&out_tensor);
param_.method_ = static_cast<int>(schema::ResizeMethod_NEAREST_NEIGHBOR);
param_.method_ = static_cast<int>(schema::ResizeMethod_NEAREST);
param_.new_width_ = out_shape[2];
param_.new_height_ = out_shape[1];
param_.align_corners_ = align_corners;

@ -15,13 +15,13 @@
*/
#include <iostream>
#include <memory>
#include "src/common/log_adapter.h"
#include "common/common_test.h"
#include "mindspore/lite/src/common/file_utils.h"
#include "mindspore/lite/src/runtime/opencl/opencl_runtime.h"
#include "mindspore/lite/src/runtime/kernel/opencl/subgraph_opencl_kernel.h"
#include "mindspore/lite/src/runtime/kernel/opencl/kernel/resize.h"
#include "mindspore/lite/test/ut/src/runtime/kernel/opencl/utils_tests.h"
#include "src/common/file_utils.h"
#include "src/common/log_adapter.h"
#include "src/runtime/kernel/opencl/kernel/resize.h"
#include "src/runtime/kernel/opencl/subgraph_opencl_kernel.h"
#include "src/runtime/opencl/opencl_runtime.h"
#include "test/ut/src/runtime/kernel/opencl/utils_tests.h"
namespace mindspore {
class TestResizeOpenCL : public mindspore::CommonTest {
@ -119,7 +119,7 @@ TEST_F(TestResizeOpenCL, ResizeBilinearFp32) {
std::vector<float> input_data = {0.0f, 1.0f, 2.0f, 3.0f};
std::vector<float> output_data = {0.0f, 0.5f, 1.0f, 1.0f, 1.0f, 1.5f, 2.0f, 2.0f,
2.0f, 2.5f, 3.0f, 3.0f, 2.0f, 2.5f, 3.0f, 3.0f};
RunTestCaseResize(shape, input_data.data(), output_data.data(), false, schema::ResizeMethod_BILINEAR, align_corners);
RunTestCaseResize(shape, input_data.data(), output_data.data(), false, schema::ResizeMethod_LINEAR, align_corners);
}
TEST_F(TestResizeOpenCL, ResizeBilinearFp16) {
@ -134,7 +134,7 @@ TEST_F(TestResizeOpenCL, ResizeBilinearFp16) {
std::vector<float16_t> input_data = {0.0f, 1.0f, 2.0f, 3.0f};
std::vector<float16_t> output_data = {0.0f, 0.5f, 1.0f, 1.0f, 1.0f, 1.5f, 2.0f, 2.0f,
2.0f, 2.5f, 3.0f, 3.0f, 2.0f, 2.5f, 3.0f, 3.0f};
RunTestCaseResize(shape, input_data.data(), output_data.data(), true, schema::ResizeMethod_BILINEAR, align_corners);
RunTestCaseResize(shape, input_data.data(), output_data.data(), true, schema::ResizeMethod_LINEAR, align_corners);
}
TEST_F(TestResizeOpenCL, ResizeBilinearAlignFp32) {
@ -148,7 +148,7 @@ TEST_F(TestResizeOpenCL, ResizeBilinearAlignFp32) {
std::vector<int> shape = {n, h, w, oh, ow, c};
std::vector<float> input_data = {0.0f, 1.0f, 2.0f, 3.0f};
std::vector<float> output_data = {0.0f, 0.5f, 1.0f, 1.0f, 1.5f, 2.0f, 2.0f, 2.5f, 3.0f};
RunTestCaseResize(shape, input_data.data(), output_data.data(), false, schema::ResizeMethod_BILINEAR, align_corners);
RunTestCaseResize(shape, input_data.data(), output_data.data(), false, schema::ResizeMethod_LINEAR, align_corners);
}
TEST_F(TestResizeOpenCL, ResizeNearestNeighborFp32) {
@ -163,8 +163,7 @@ TEST_F(TestResizeOpenCL, ResizeNearestNeighborFp32) {
std::vector<float> input_data = {0.0f, 1.0f, 2.0f, 3.0f};
std::vector<float> output_data = {0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f,
2.0f, 2.0f, 3.0f, 3.0f, 2.0f, 2.0f, 3.0f, 3.0f};
RunTestCaseResize(shape, input_data.data(), output_data.data(), false, schema::ResizeMethod_NEAREST_NEIGHBOR,
align_corners);
RunTestCaseResize(shape, input_data.data(), output_data.data(), false, schema::ResizeMethod_NEAREST, align_corners);
}
TEST_F(TestResizeOpenCL, ResizeNearestNeighborFp16) {
@ -179,7 +178,6 @@ TEST_F(TestResizeOpenCL, ResizeNearestNeighborFp16) {
std::vector<float16_t> input_data = {0.0f, 1.0f, 2.0f, 3.0f};
std::vector<float16_t> output_data = {0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f,
2.0f, 2.0f, 3.0f, 3.0f, 2.0f, 2.0f, 3.0f, 3.0f};
RunTestCaseResize(shape, input_data.data(), output_data.data(), true, schema::ResizeMethod_NEAREST_NEIGHBOR,
align_corners);
RunTestCaseResize(shape, input_data.data(), output_data.data(), true, schema::ResizeMethod_NEAREST, align_corners);
}
} // namespace mindspore

@ -40,7 +40,7 @@ TEST_F(TestTfliteParserResizeNN, AttrValue) {
ASSERT_EQ(val->newWidth, 100);
ASSERT_EQ(val->format, schema::Format_NHWC);
ASSERT_EQ(val->preserveAspectRatio, false);
ASSERT_EQ(val->method, schema::ResizeMethod_NEAREST_NEIGHBOR);
ASSERT_EQ(val->method, schema::ResizeMethod_NEAREST);
}
class TestTfliteParserResizeBilinear : public TestTfliteParser {
@ -64,7 +64,7 @@ TEST_F(TestTfliteParserResizeBilinear, AttrValue) {
ASSERT_EQ(val->newWidth, 4);
ASSERT_EQ(val->format, schema::Format_NHWC);
ASSERT_EQ(val->preserveAspectRatio, false);
ASSERT_EQ(val->method, schema::ResizeMethod_BILINEAR);
ASSERT_EQ(val->method, schema::ResizeMethod_LINEAR);
}
} // namespace mindspore

@ -57,7 +57,7 @@ STATUS CaffeInterpParser::Parse(const caffe::LayerParameter &proto, const caffe:
attr->newWidth = width;
}
attr->alignCorners = true;
attr->method = schema::ResizeMethod_BILINEAR;
attr->method = schema::ResizeMethod_LINEAR;
op->name = proto.name();
op->primitive->value.type = schema::PrimitiveType_Resize;

@ -582,6 +582,94 @@ STATUS OnnxSignParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod
return RET_OK;
}
STATUS OnnxAndParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx AndParser";
if (op == nullptr) {
MS_LOG(ERROR) << "op is null";
return RET_NULL_PTR;
}
op->primitive = std::make_unique<schema::PrimitiveT>();
if (op->primitive == nullptr) {
MS_LOG(ERROR) << "op->primitive is null";
return RET_NULL_PTR;
}
std::unique_ptr<schema::LogicalAndT> attr = std::make_unique<schema::LogicalAndT>();
if (attr == nullptr) {
MS_LOG(ERROR) << "new op failed";
return RET_NULL_PTR;
}
op->primitive->value.type = schema::PrimitiveType_LogicalAnd;
op->primitive->value.value = attr.release();
return RET_OK;
}
STATUS OnnxOrParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx OrParser";
if (op == nullptr) {
MS_LOG(ERROR) << "op is null";
return RET_NULL_PTR;
}
op->primitive = std::make_unique<schema::PrimitiveT>();
if (op->primitive == nullptr) {
MS_LOG(ERROR) << "op->primitive is null";
return RET_NULL_PTR;
}
std::unique_ptr<schema::LogicalOrT> attr = std::make_unique<schema::LogicalOrT>();
if (attr == nullptr) {
MS_LOG(ERROR) << "new op failed";
return RET_NULL_PTR;
}
op->primitive->value.type = schema::PrimitiveType_LogicalOr;
op->primitive->value.value = attr.release();
return RET_OK;
}
STATUS OnnxNotParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx NotParser";
if (op == nullptr) {
MS_LOG(ERROR) << "op is null";
return RET_NULL_PTR;
}
op->primitive = std::make_unique<schema::PrimitiveT>();
if (op->primitive == nullptr) {
MS_LOG(ERROR) << "op->primitive is null";
return RET_NULL_PTR;
}
std::unique_ptr<schema::LogicalNotT> attr = std::make_unique<schema::LogicalNotT>();
if (attr == nullptr) {
MS_LOG(ERROR) << "new op failed";
return RET_NULL_PTR;
}
op->primitive->value.type = schema::PrimitiveType_LogicalNot;
op->primitive->value.value = attr.release();
return RET_OK;
}
STATUS OnnxRoundParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node,
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx RoundParser";
if (op == nullptr) {
MS_LOG(ERROR) << "op is null";
return RET_NULL_PTR;
}
op->primitive = std::make_unique<schema::PrimitiveT>();
if (op->primitive == nullptr) {
MS_LOG(ERROR) << "op->primitive is null";
return RET_NULL_PTR;
}
std::unique_ptr<schema::RoundT> attr = std::make_unique<schema::RoundT>();
if (attr == nullptr) {
MS_LOG(ERROR) << "new op failed";
return RET_NULL_PTR;
}
op->primitive->value.type = schema::PrimitiveType_Round;
op->primitive->value.value = attr.release();
return RET_OK;
}
OnnxNodeRegistrar g_onnxAddParser("Add", new OnnxAddParser());
OnnxNodeRegistrar g_onnxInt8AddParser("Int8Add", new OnnxAddParser());
OnnxNodeRegistrar g_onnxSubParser("Sub", new OnnxSubParser());
@ -608,5 +696,9 @@ OnnxNodeRegistrar g_onnxAtanParser("Atan", new OnnxAtanParser());
OnnxNodeRegistrar g_onnxAsinParser("Asin", new OnnxAsinParser());
OnnxNodeRegistrar g_onnxTanhParser("Tanh", new OnnxTanhParser());
OnnxNodeRegistrar g_onnxSignParser("Sign", new OnnxTanhParser());
OnnxNodeRegistrar g_onnxAndParser("And", new OnnxAndParser());
OnnxNodeRegistrar g_onnxOrParser("Or", new OnnxOrParser());
OnnxNodeRegistrar g_onnxNotParser("Not", new OnnxNotParser());
OnnxNodeRegistrar g_onnxRoundParser("Round", new OnnxRoundParser());
} // namespace lite
} // namespace mindspore

@ -171,6 +171,30 @@ class OnnxSignParser : public OnnxNodeParser {
OnnxSignParser() : OnnxNodeParser("Sign") {}
STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override;
};
class OnnxAndParser : public OnnxNodeParser {
public:
OnnxAndParser() : OnnxNodeParser("And") {}
STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override;
};
class OnnxOrParser : public OnnxNodeParser {
public:
OnnxOrParser() : OnnxNodeParser("Or") {}
STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override;
};
class OnnxNotParser : public OnnxNodeParser {
public:
OnnxNotParser() : OnnxNodeParser("Not") {}
STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override;
};
class OnnxRoundParser : public OnnxNodeParser {
public:
OnnxRoundParser() : OnnxNodeParser("Round") {}
STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_ARITHMETIC_OPREATION_PARSER_H

@ -15,9 +15,9 @@
*/
#include "tools/converter/parser/onnx/onnx_conv_parser.h"
#include <vector>
#include <memory>
#include <algorithm>
#include <memory>
#include <vector>
namespace mindspore {
namespace lite {
@ -176,9 +176,6 @@ STATUS OnnxConvParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod
MS_LOG(ERROR) << "Convert Convolution to Depthwise failed";
return RET_ERROR;
}
} else if (attr->group != 1) {
MS_LOG(ERROR) << "group conv hasn't supported";
return RET_NOT_SUPPORT;
} else {
op->primitive->value.type = schema::PrimitiveType_Conv2D;
op->primitive->value.value = attr.release();

@ -0,0 +1,49 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "tools/converter/parser/onnx/onnx_identity_parser.h"
#include <memory>
#include <vector>
namespace mindspore {
namespace lite {
STATUS OnnxIdentityParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node,
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx IdentityParser";
if (op == nullptr) {
MS_LOG(ERROR) << "op is null";
return RET_NULL_PTR;
}
op->primitive = std::make_unique<schema::PrimitiveT>();
if (op->primitive == nullptr) {
MS_LOG(ERROR) << "op->primitive is null";
return RET_NULL_PTR;
}
std::unique_ptr<schema::IdentityT> attr = std::make_unique<schema::IdentityT>();
if (attr == nullptr) {
MS_LOG(ERROR) << "new op failed";
return RET_NULL_PTR;
}
op->primitive->value.type = schema::PrimitiveType_Identity;
op->primitive->value.value = attr.release();
return RET_OK;
}
OnnxNodeRegistrar g_onnxIdentityParser("Identity", new OnnxIdentityParser());
} // namespace lite
} // namespace mindspore

@ -0,0 +1,33 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_IDENTITY_PARSER_H
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_IDENTITY_PARSER_H
#include "tools/converter/parser/onnx/onnx_node_parser.h"
#include "tools/converter/parser/onnx/onnx_node_parser_registry.h"
namespace mindspore {
namespace lite {
class OnnxIdentityParser : public OnnxNodeParser {
public:
OnnxIdentityParser() : OnnxNodeParser("Identity") {}
STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_IDENTITY_PARSER_H

@ -0,0 +1,55 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "tools/converter/parser/onnx/onnx_instance_norm_parser.h"
#include <memory>
namespace mindspore {
namespace lite {
STATUS OnnxInstanceNormParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node,
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx InstanceNormParser";
if (op == nullptr) {
MS_LOG(ERROR) << "op is null";
return RET_NULL_PTR;
}
op->primitive = std::make_unique<schema::PrimitiveT>();
if (op->primitive == nullptr) {
MS_LOG(ERROR) << "op->primitive is null";
return RET_NULL_PTR;
}
std::unique_ptr<schema::InstanceNormT> attr = std::make_unique<schema::InstanceNormT>();
if (attr == nullptr) {
MS_LOG(ERROR) << "new op failed";
return RET_NULL_PTR;
}
if (!onnx_node.attribute().empty()) {
auto onnx_node_attr = onnx_node.attribute().at(0);
if (onnx_node_attr.name() == "epsilon") {
attr->epsilon = onnx_node_attr.f();
}
}
op->primitive->value.type = schema::PrimitiveType_InstanceNorm;
op->primitive->value.value = attr.release();
return RET_OK;
}
OnnxNodeRegistrar g_onnxInstanceNormParser("InstanceNormalization", new OnnxInstanceNormParser());
} // namespace lite
} // namespace mindspore

@ -0,0 +1,33 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_INSTANCE_NORM_PARSER_H
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_INSTANCE_NORM_PARSER_H
#include "tools/converter/parser/onnx/onnx_node_parser.h"
#include "tools/converter/parser/onnx/onnx_node_parser_registry.h"
namespace mindspore {
namespace lite {
class OnnxInstanceNormParser : public OnnxNodeParser {
public:
OnnxInstanceNormParser() : OnnxNodeParser("InstanceNorm") {}
STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_INSTANCE_NORM_PARSER_H

@ -26,6 +26,7 @@
#include <vector>
#include <memory>
#include <set>
#include <map>
#include "securec/include/securec.h"
#include "tools/converter/model_parser.h"
#include "tools/converter/parser/onnx/onnx_node_parser_registry.h"
@ -40,6 +41,7 @@ class OnnxModelParser : public ModelParser {
virtual ~OnnxModelParser();
schema::MetaGraphT *ParseGraph(const onnx::GraphProto &graph, const QuantType &quantType = QuantType_QUANT_NONE);
schema::MetaGraphT *ParseToFb(const std::string &modelFile, const std::string &weightFile,
const QuantType &quantType = QuantType_QUANT_NONE) override;
@ -62,7 +64,7 @@ class OnnxModelParser : public ModelParser {
STATUS ParseOnnxNodeToDstOp(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node,
schema::CNodeT *dst_op, schema::TensorT *dst_tensor, TensorCache *tensor_cache,
const QuantType &quantType);
const QuantType &quantType, schema::MetaGraphT *dst_graph);
void ParseOnnxGemmNode(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node,
schema::MetaGraphT *graph, TensorCache *tensor_cache, const QuantType &quant_type);
@ -86,9 +88,13 @@ class OnnxModelParser : public ModelParser {
void FindGraphInputAndConst(const onnx::GraphProto &onnx_graph);
STATUS ParseLoopAttr(schema::CNodeT *dst_op, const onnx::NodeProto &onnx_node, const QuantType &quantType,
schema::MetaGraphT *dst_graph);
private:
std::vector<string> graphInputNames;
std::vector<string> graphConstNames;
std::vector<std::string> graphInputNames;
std::vector<std::string> graphConstNames;
int subGraphNum = 0;
};
} // namespace lite
} // namespace mindspore

@ -0,0 +1,80 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "tools/converter/parser/onnx/onnx_non_max_suppression_parser.h"
#include <memory>
namespace mindspore {
namespace lite {
STATUS OnnxNonMaxSuppressionParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node,
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx EluParser";
if (op == nullptr) {
MS_LOG(ERROR) << "op is null";
return RET_NULL_PTR;
}
op->primitive = std::make_unique<schema::PrimitiveT>();
if (op->primitive == nullptr) {
MS_LOG(ERROR) << "op->primitive is null";
return RET_NULL_PTR;
}
std::unique_ptr<schema::NonMaxSuppressionT> attr = std::make_unique<schema::NonMaxSuppressionT>();
if (attr == nullptr) {
MS_LOG(ERROR) << "new op failed";
return RET_NULL_PTR;
}
if (onnx_node.input_size() > 2) {
auto it = std::find_if(onnx_graph.initializer().begin(), onnx_graph.initializer().end(),
[&](const onnx::TensorProto &it) { return it.name() == onnx_node.input(2); });
if (it != onnx_graph.initializer().end()) {
attr->maxOutBoxPerClass = it->int64_data(0);
}
}
if (onnx_node.input_size() > 3) {
auto it = std::find_if(onnx_graph.initializer().begin(), onnx_graph.initializer().end(),
[&](const onnx::TensorProto &it) { return it.name() == onnx_node.input(3); });
if (it != onnx_graph.initializer().end()) {
attr->iouThreshold = it->float_data(0);
}
}
if (onnx_node.input_size() > 4) {
auto it = std::find_if(onnx_graph.initializer().begin(), onnx_graph.initializer().end(),
[&](const onnx::TensorProto &it) { return it.name() == onnx_node.input(4); });
if (it != onnx_graph.initializer().end()) {
attr->scoreThreshold = it->float_data(0);
}
}
for (const auto &onnx_node_attr : onnx_node.attribute()) {
const auto &attribute_name = onnx_node_attr.name();
if (attribute_name == "center_point_box") {
if (onnx_node_attr.has_i()) {
attr->centerPointBox = onnx_node_attr.i();
}
}
}
op->primitive->value.type = schema::PrimitiveType_Elu;
op->primitive->value.value = attr.release();
return RET_OK;
}
OnnxNodeRegistrar g_onnxNonMaxSuppressionParser("NonMaxSuppression", new OnnxNonMaxSuppressionParser());
} // namespace lite
} // namespace mindspore

@ -0,0 +1,33 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_NON_MAX_SUPPRESSION_PARSER_H
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_NON_MAX_SUPPRESSION_PARSER_H
#include "tools/converter/parser/onnx/onnx_node_parser.h"
#include "tools/converter/parser/onnx/onnx_node_parser_registry.h"
namespace mindspore {
namespace lite {
class OnnxNonMaxSuppressionParser : public OnnxNodeParser {
public:
OnnxNonMaxSuppressionParser() : OnnxNodeParser("NonMaxSuppression") {}
STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_NON_MAX_SUPPRESSION_PARSER_H

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save