modify onnx parser

modify caffe parser
pull/4825/head
lyvette 5 years ago
parent 1793a07e10
commit d1475aa5dd

@ -27,5 +27,4 @@ add_library(caffe_parser_mid OBJECT
${CMAKE_CURRENT_SOURCE_DIR}/caffe_inspector.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_interp_parser.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_permute_parser.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_tile_parser.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_proposal_parser.cc)
${CMAKE_CURRENT_SOURCE_DIR}/caffe_tile_parser.cc)

@ -24,7 +24,7 @@ STATUS CaffeArgMaxParser::Parse(const caffe::LayerParameter &proto,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
op->name = proto.name();
std::unique_ptr<schema::ArgMaxT> attr(new schema::ArgMaxT());
std::unique_ptr<schema::ArgMaxT> attr = std::make_unique<schema::ArgMaxT>();
const caffe::ArgMaxParameter argmaxParam = proto.argmax_param();
int32_t axisType = 0;

@ -32,7 +32,7 @@ STATUS CaffeBatchNormParser::Parse(const caffe::LayerParameter &proto, const caf
schema::CNodeT *op, std::vector<schema::TensorT *> *weightVec) {
op->name = proto.name();
// caffe batch norm attr
std::unique_ptr<schema::BatchNormT> attr(new schema::BatchNormT());
std::unique_ptr<schema::BatchNormT> attr = std::make_unique<schema::BatchNormT>();
const caffe::BatchNormParameter batchNormParam = proto.batch_norm_param();
// check bottom size

@ -26,7 +26,7 @@ STATUS CaffeConcatParser::Parse(const caffe::LayerParameter &proto,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
op->name = proto.name();
std::unique_ptr<schema::ConcatT> attr(new schema::ConcatT());
std::unique_ptr<schema::ConcatT> attr = std::make_unique<schema::ConcatT>();
const caffe::ConcatParameter concatParam = proto.concat_param();
if (concatParam.has_axis() && concatParam.has_concat_dim()) {
// MS_LOGE("Concat param in caffe have concat_dim and axis simultaneously,return fail");

@ -24,7 +24,7 @@ void CaffeConvolutionParser::ParseGroupConvolution(schema::CNodeT *op, schema::C
if (attr == nullptr || attr->group == 1) {
return;
}
std::unique_ptr<schema::DepthwiseConv2DT> depthwiseConv2DParam(new schema::DepthwiseConv2DT());
std::unique_ptr<schema::DepthwiseConv2DT> depthwiseConv2DParam = std::make_unique<schema::DepthwiseConv2DT>();
if (depthwiseConv2DParam == nullptr) {
// MS_LOGW("new DepthwiseConv2DT failed");
return;
@ -125,6 +125,7 @@ STATUS CaffeConvolutionParser::Parse(const caffe::LayerParameter &proto, const c
if (status != RET_OK) {
MS_LOG(ERROR) << "ParseWeight for " << proto.name().c_str() << " failed";
}
return status;
}

@ -25,7 +25,7 @@ STATUS CaffeCropParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::CropT> attr(new schema::CropT());
std::unique_ptr<schema::CropT> attr = std::make_unique<schema::CropT>();
if (!proto.has_crop_param()) {
attr->axis = CROP_AXIS;
std::vector<int64_t> offsets(2, 0);

@ -24,7 +24,8 @@ void CaffeDeconvolutionParser::ParseGroupDeconvolution(schema::CNodeT *op, schem
return;
}
std::unique_ptr<schema::DeDepthwiseConv2DT> deDepthwiseConv2DParam(new schema::DeDepthwiseConv2DT());
std::unique_ptr<schema::DeDepthwiseConv2DT> deDepthwiseConv2DParam
= std::make_unique<schema::DeDepthwiseConv2DT>();
if (deDepthwiseConv2DParam == nullptr) {
MS_LOG(ERROR) << "new DeDepthwiseConv2DT failed";
return;
@ -125,6 +126,7 @@ STATUS CaffeDeconvolutionParser::Parse(const caffe::LayerParameter &proto, const
if (status != RET_OK) {
MS_LOG(ERROR) << "ParseWeight for " << proto.name().c_str() << " failed";
}
return status;
}

@ -26,7 +26,7 @@ namespace mindspore {
namespace lite {
STATUS CaffeEltwiseParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight,
schema::CNodeT *op, std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::EltwiseT> attr(new schema::EltwiseT());
std::unique_ptr<schema::EltwiseT> attr = std::make_unique<schema::EltwiseT>();
if (proto.bottom_size() < ELTWISE_MIN_INPUT_SIZE) {
MS_LOG(ERROR) << "Eltwise Op " << proto.name() << " need at least 2 inputs,but input size is "
<< proto.bottom_size();

@ -22,7 +22,7 @@ namespace lite {
STATUS CaffeInnerProductParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight,
schema::CNodeT *op, std::vector<schema::TensorT *> *weightVec) {
const caffe::InnerProductParameter innerProductParam = proto.inner_product_param();
std::unique_ptr<schema::FullConnectionT> attr(new schema::FullConnectionT());
std::unique_ptr<schema::FullConnectionT> attr = std::make_unique<schema::FullConnectionT>();
if (!innerProductParam.has_num_output()) {
// MS_LOGE("InnerProduct Parse num_output for %s failed.", proto.name().c_str());

@ -21,7 +21,7 @@ namespace mindspore {
namespace lite {
STATUS CaffeInterpParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight,
schema::CNodeT *op, std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::ResizeT> attr(new schema::ResizeT());
std::unique_ptr<schema::ResizeT> attr = std::make_unique<schema::ResizeT>();
const caffe::InterpParameter interpParam = proto.interp_param();
if (interpParam.has_height()) {
int64_t height = interpParam.height();

@ -33,7 +33,7 @@ const std::set<std::string> CaffeModelParser::skipedLayerType = {"Dropout"};
schema::MetaGraphT *CaffeModelParser::Parse(const std::string &modelFile, const std::string &weightFile,
const QuantType &quantType) {
std::unique_ptr<schema::MetaGraphT> graph(new schema::MetaGraphT());
// std::unique_ptr<schema::MetaGraphT> graph = std::make_unique<schema::MetaGraphT>();
if (ValidateFileStr(modelFile, ".prototxt") != RET_OK) {
MS_LOG(ERROR) << "INPUT ILLEGAL: modelFile must be *.prototxt";
@ -50,7 +50,7 @@ schema::MetaGraphT *CaffeModelParser::Parse(const std::string &modelFile, const
return nullptr;
}
std::unique_ptr<schema::MetaGraphT> subGraphDef(new schema::MetaGraphT());
std::unique_ptr<schema::MetaGraphT> subGraphDef = std::make_unique<schema::MetaGraphT>();
TensorCache tensorCache;
caffe::NetParameter proto;
@ -87,11 +87,11 @@ schema::MetaGraphT *CaffeModelParser::Parse(const std::string &modelFile, const
subGraphDef->name = GetModelName(modelFile);
// set all tensors to graph
SetAllTensors(tensorCache, subGraphDef.get());
graph = move(subGraphDef);
// graph = move(subGraphDef);
// ConvertCaffeBatchNorm(graph.get());
return graph.release();
return subGraphDef.release();
// return Fb2Anf(graph.release());
}
@ -112,7 +112,7 @@ STATUS CaffeModelParser::SetOpInputIdx(const caffe::LayerParameter &layer, schem
STATUS CaffeModelParser::SetOpOutputIdx(const caffe::LayerParameter &layer, schema::CNodeT *op,
TensorCache *tensorCache) {
for (int i = 0; i < layer.top_size(); i++) {
std::unique_ptr<schema::TensorT> msTensor(new schema::TensorT());
std::unique_ptr<schema::TensorT> msTensor = std::make_unique<schema::TensorT>();
op->outputIndex.emplace_back(tensorCache->AddTensor(layer.top(i), msTensor.release(), OP_OUTPUT));
}
return RET_OK;
@ -176,7 +176,7 @@ STATUS CaffeModelParser::ParseLayer(const caffe::NetParameter &proto, const caff
}
// todo y00520784 : layer.input_param().shape(0)
if (layer.type() == "Input") {
std::unique_ptr<schema::TensorT> msTensor(new schema::TensorT());
std::unique_ptr<schema::TensorT> msTensor = std::make_unique<schema::TensorT>();
for (int j = 0; j < layer.input_param().shape(0).dim_size(); j++) {
msTensor->dims.push_back(layer.input_param().shape(0).dim(j));
}
@ -190,7 +190,7 @@ STATUS CaffeModelParser::ParseLayer(const caffe::NetParameter &proto, const caff
continue;
}
std::unique_ptr<schema::CNodeT> op(new schema::CNodeT());
std::unique_ptr<schema::CNodeT> op = std::make_unique<schema::CNodeT>();
op->name = layer.name();
// set op input index
@ -234,7 +234,7 @@ STATUS CaffeModelParser::GetModelInput(const caffe::NetParameter &proto, TensorC
if (proto.input_dim_size() <= 0) {
continue;
}
std::unique_ptr<schema::TensorT> msTensor(new schema::TensorT());
std::unique_ptr<schema::TensorT> msTensor = std::make_unique<schema::TensorT>();
for (int j = 0; j < proto.input_dim_size(); j++) {
msTensor->dims.push_back(proto.input_dim(j));
}
@ -245,7 +245,7 @@ STATUS CaffeModelParser::GetModelInput(const caffe::NetParameter &proto, TensorC
for (int i = 0; i < proto.input_shape_size(); i++) {
auto shape = proto.input_shape(i);
std::unique_ptr<schema::TensorT> msTensor(new schema::TensorT());
std::unique_ptr<schema::TensorT> msTensor = std::make_unique<schema::TensorT>();
for (int j = 0; j < shape.dim_size(); j++) {
msTensor->dims.push_back(shape.dim(j));
}

@ -22,7 +22,7 @@
namespace mindspore {
namespace lite {
schema::TensorT *ConvertWeight(const caffe::BlobProto &proto) {
std::unique_ptr<schema::TensorT> weight(new schema::TensorT());
std::unique_ptr<schema::TensorT> weight = std::make_unique<schema::TensorT>();
weight->format = schema::Format_NCHW;
std::vector<int32_t> shapeVec;
ConvertShape(proto, &shapeVec);
@ -46,7 +46,7 @@ schema::TensorT *ConvertWeight(const caffe::BlobProto &proto) {
}
// get weight
std::unique_ptr<float[]> buf(new (std::nothrow) float[count]());
std::unique_ptr<float[]> buf = std::make_unique<float[]>(count);
if (buf == nullptr) {
return nullptr;
}

@ -24,7 +24,7 @@ STATUS CaffePermuteParser::Parse(const caffe::LayerParameter &proto,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
op->name = proto.name();
std::unique_ptr<schema::TransposeT> attr(new schema::TransposeT());
std::unique_ptr<schema::TransposeT> attr = std::make_unique<schema::TransposeT>();
const caffe::PermuteParameter permuteParam = proto.permute_param();
const int num_order_dims = permuteParam.order_size();

@ -27,7 +27,7 @@ STATUS CaffePoolingParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::PoolingT> attr(new schema::PoolingT());
std::unique_ptr<schema::PoolingT> attr = std::make_unique<schema::PoolingT>();
attr->format = schema::Format_NCHW;
const caffe::PoolingParameter poolingParam = proto.pooling_param();

@ -27,7 +27,7 @@ STATUS CaffePowerParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::PowerT> attr(new schema::PowerT());
std::unique_ptr<schema::PowerT> attr = std::make_unique<schema::PowerT>();
const caffe::PowerParameter powerParam = proto.power_param();
if (proto.has_power_param()) {
attr->power = powerParam.has_power() ? powerParam.power() : CAFFE_POWER_DEFAULT_POWER;

@ -23,7 +23,7 @@ STATUS CaffePReluParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::CaffePReLUT> attr(new schema::CaffePReLUT());
std::unique_ptr<schema::CaffePReLUT> attr = std::make_unique<schema::CaffePReLUT>();
const caffe::PReLUParameter pReluParam = proto.prelu_param();
if (pReluParam.has_channel_shared()) {
attr->channelShared = pReluParam.channel_shared();

@ -1,67 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <memory>
#include <vector>
#include "mindspore/lite/tools/converter/parser/caffe/caffe_proposal_parser.h"
namespace mindspore {
namespace lite {
STATUS CaffeProposalParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::ProposalT> attr(new schema::ProposalT());
const caffe::ProposalParameter proposal_param = proto.proposal_param();
if (proposal_param.has_feat_stride()) {
attr->feat_stride = proposal_param.feat_stride();
}
if (proposal_param.has_base_size()) {
attr->base_size = proposal_param.base_size();
}
if (proposal_param.has_min_size()) {
attr->min_size = proposal_param.min_size();
}
if (proposal_param.has_pre_nms_topn()) {
attr->pre_nms_topn = proposal_param.pre_nms_topn();
}
if (proposal_param.has_post_nms_topn()) {
attr->post_nms_topn = proposal_param.post_nms_topn();
}
if (proposal_param.has_nms_thresh()) {
attr->nms_thresh = proposal_param.nms_thresh();
}
const int num_ratio = proposal_param.ratio_size();
attr->ratio.resize(num_ratio);
for (int i = 0; i < num_ratio; ++i) {
attr->ratio[i] = proposal_param.ratio(i);
}
const int num_scale = proposal_param.scale_size();
attr->scale.resize(num_scale);
for (int i = 0; i < num_scale; ++i) {
attr->scale[i] = proposal_param.scale(i);
}
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.value = attr.release();
op->primitive->value.type = schema::PrimitiveType_Tile;
return RET_OK;
}
CaffeNodeRegistrar g_caffeProposalParser("Proposal", new CaffeProposalParser());
} // namespace lite
} // namespace mindspore

@ -1,36 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LITE_CAFFE_PROPOSAL_PARSER_H
#define LITE_CAFFE_PROPOSAL_PARSER_H
#include <vector>
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser_registry.h"
namespace mindspore {
namespace lite {
class CaffeProposalParser : public CaffeNodeParser {
public:
CaffeProposalParser() : CaffeNodeParser("proposal") {}
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;
};
} // namespace lite
} // namespace mindspore
#endif // LITE_CAFFE_PROPOSAL_PARSER_H

@ -23,7 +23,7 @@ STATUS CaffeReluParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::ActivationT> attr(new schema::ActivationT());
std::unique_ptr<schema::ActivationT> attr = std::make_unique<schema::ActivationT>();
attr->type = schema::ActivationType_RELU;
// relu: negative_slope = 0, no parameter;
// leakyrelu: negative_slope != 0;

@ -23,7 +23,7 @@ STATUS CaffeReshapeParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::ReshapeT> attr(new schema::ReshapeT());
std::unique_ptr<schema::ReshapeT> attr = std::make_unique<schema::ReshapeT>();
attr->format = schema::Format_NCHW;
const caffe::ReshapeParameter reshapeParam = proto.reshape_param();

@ -24,7 +24,7 @@ namespace mindspore {
namespace lite {
STATUS CaffeScaleParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight,
schema::CNodeT *op, std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::ScaleT> attr(new schema::ScaleT());
std::unique_ptr<schema::ScaleT> attr = std::make_unique<schema::ScaleT>();
if (weight.blobs_size() + weight.bottom_size() < 2) {
// MS_LOGE("Scale bottom size:%d, blobs size:%d invalid in layer %s", weight.bottom_size(), weight.blobs_size(),

@ -23,7 +23,7 @@ STATUS CaffeSigmoidParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::ActivationT> attr(new schema::ActivationT());
std::unique_ptr<schema::ActivationT> attr = std::make_unique<schema::ActivationT>();
attr->type = schema::ActivationType_SIGMOID;
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.value = attr.release();

@ -26,7 +26,7 @@ STATUS CaffeSoftmaxParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::SoftMaxT> attr(new schema::SoftMaxT());
std::unique_ptr<schema::SoftMaxT> attr = std::make_unique<schema::SoftMaxT>();
if (proto.has_softmax_param() && proto.softmax_param().has_axis()) {
if (proto.softmax_param().axis() == -1) {
MS_LOG(ERROR) << "axis with -1 may lead to calculation errors when input less than 4 dims.";

@ -24,7 +24,7 @@ STATUS CaffeTileParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::TileT> attr(new schema::TileT());
std::unique_ptr<schema::TileT> attr = std::make_unique<schema::TileT>();
const caffe::TileParameter tile_param = proto.tile_param();
std::vector<int> dims;

@ -23,7 +23,7 @@ STATUS OnnxArgMaxParser::Parse(const onnx::GraphProto &onnx_graph,
const onnx::NodeProto &onnx_node,
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx ArgMaxParser";
std::unique_ptr<schema::ArgMaxT> attr(new schema::ArgMaxT());
std::unique_ptr<schema::ArgMaxT> attr = std::make_unique<schema::ArgMaxT>();
for (const auto &onnx_node_attr : onnx_node.attribute()) {
const auto &attribute_name = onnx_node_attr.name();
if (attribute_name == "axis") {

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save