!4747 modify caffe & tflite parsers format

Merge pull request !4747 from lyvette/tflite_parser
pull/4747/MERGE
mindspore-ci-bot 5 years ago committed by Gitee
commit 20e80c56b3

@ -23,11 +23,25 @@ STATUS CaffeArgMaxParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
op->name = proto.name();
MS_LOG(DEBUG) << "parse CaffeArgMaxParser";
if (op == nullptr) {
MS_LOG(ERROR) << "op is null";
return RET_NULL_PTR;
}
op->primitive = std::make_unique<schema::PrimitiveT>();
if (op->primitive == nullptr) {
MS_LOG(ERROR) << "op->primitive is null";
return RET_NULL_PTR;
}
std::unique_ptr<schema::ArgMaxT> attr = std::make_unique<schema::ArgMaxT>();
const caffe::ArgMaxParameter argmaxParam = proto.argmax_param();
if (attr == nullptr) {
MS_LOG(ERROR) << "new op failed";
return RET_NULL_PTR;
}
int32_t axisType = 0;
const caffe::ArgMaxParameter argmaxParam = proto.argmax_param();
int32_t axisType;
int32_t axis = 0;
if (!argmaxParam.has_axis()) {
axisType = 2;
@ -35,20 +49,19 @@ STATUS CaffeArgMaxParser::Parse(const caffe::LayerParameter &proto,
axisType = 1;
axis = (int64_t)argmaxParam.axis();
if (axis == -1) {
// MS_LOGE("axis with -1 may lead to calculation errors when input less than 4 dims.");
MS_LOG(ERROR) << "axis with -1 may lead to calculation errors when input less than 4 dims.";
return RET_ERROR;
}
}
attr->axis = axis;
attr->axisType = axisType;
attr->outMaxValue = argmaxParam.out_max_val();
attr->topK = argmaxParam.top_k();
attr->keepDims = true;
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.value = attr.release();
op->name = proto.name();
op->primitive->value.type = schema::PrimitiveType_ArgMax;
op->primitive->value.value = attr.release();
return RET_OK;
}

@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_ARGMAX_PARSER_H_
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_ARGMAX_PARSER_H_
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_ARGMAX_PARSER_H_
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_ARGMAX_PARSER_H_
#include <vector>
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
@ -27,11 +27,13 @@ class CaffeArgMaxParser : public CaffeNodeParser {
public:
CaffeArgMaxParser() : CaffeNodeParser("argmax") {}
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
STATUS Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_ARGMAX_PARSER_H_
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_ARGMAX_PARSER_H_

@ -14,9 +14,9 @@
* limitations under the License.
*/
#include "mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.h"
#include <cmath>
#include <memory>
#include "mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.h"
#include "tools/common/tensor_util.h"
#define CAFFE_BATCH_NORM_ESP_DEFAULT_FLOAT 0.00001
@ -28,13 +28,29 @@ static const int CAFFE_BATCHNORMAL_TOP_SIZE = 1;
namespace mindspore {
namespace lite {
using STATUS = int;
STATUS CaffeBatchNormParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight,
schema::CNodeT *op, std::vector<schema::TensorT *> *weightVec) {
op->name = proto.name();
// caffe batch norm attr
STATUS CaffeBatchNormParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
MS_LOG(DEBUG) << "parse CaffeBatchNormParser";
if (op == nullptr) {
MS_LOG(ERROR) << "op is null";
return RET_NULL_PTR;
}
op->primitive = std::make_unique<schema::PrimitiveT>();
if (op->primitive == nullptr) {
MS_LOG(ERROR) << "op->primitive is null";
return RET_NULL_PTR;
}
std::unique_ptr<schema::BatchNormT> attr = std::make_unique<schema::BatchNormT>();
const caffe::BatchNormParameter batchNormParam = proto.batch_norm_param();
if (attr == nullptr) {
MS_LOG(ERROR) << "new op failed";
return RET_NULL_PTR;
}
const caffe::BatchNormParameter batchNormParam = proto.batch_norm_param();
// check bottom size
if (proto.bottom_size() != CAFFE_BATCHNORMAL_BOTTOM_SIZE) {
MS_LOG(ERROR) << "Layer " << proto.name().c_str() << "bottom numbers is error, it must be " \
@ -50,7 +66,8 @@ STATUS CaffeBatchNormParser::Parse(const caffe::LayerParameter &proto, const caf
}
if (batchNormParam.has_eps()) {
if (fabs(CAFFE_BATCH_NORM_ESP_DEFAULT_FLOAT - batchNormParam.eps()) < CAFFE_BATCH_NORM_ESP_DEFAULT_DIFF_FLOAT) {
if (fabs(CAFFE_BATCH_NORM_ESP_DEFAULT_FLOAT - batchNormParam.eps())
< CAFFE_BATCH_NORM_ESP_DEFAULT_DIFF_FLOAT) {
attr->epsilon = CAFFE_BATCH_NORM_ESP_DEFAULT_FLOAT;
} else {
auto tmpAuto = batchNormParam.eps();
@ -67,7 +84,7 @@ STATUS CaffeBatchNormParser::Parse(const caffe::LayerParameter &proto, const caf
// parse weight gamma
auto gamma = ConvertWeight(weight.blobs(0));
if (gamma == nullptr) {
// MS_LOGE("Convert blobs(0) for layer %s failed", weight.name().c_str());
MS_LOG(ERROR) << "Convert blobs(0) for layer " << weight.name().c_str() << " failed";
return RET_ERROR;
}
@ -82,7 +99,7 @@ STATUS CaffeBatchNormParser::Parse(const caffe::LayerParameter &proto, const caf
// parse weight beta
auto beta = ConvertWeight(weight.blobs(1));
if (beta == nullptr) {
// MS_LOGE("Convert blobs(1) for layer %s failed", weight.name().c_str());
MS_LOG(ERROR) << "Convert blobs(1) for layer " << weight.name().c_str() << " failed";
return RET_ERROR;
}
@ -94,10 +111,9 @@ STATUS CaffeBatchNormParser::Parse(const caffe::LayerParameter &proto, const caf
estimatedVariance = nullptr;
weightVec->push_back(beta);
op->primitive = std::make_unique<schema::PrimitiveT>();
op->name = proto.name();
op->primitive->value.type = schema::PrimitiveType_BatchNorm;
op->primitive->value.value = attr.release();
return RET_OK;
}

@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_BATCHNORM_PARSER_H_
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_BATCHNORM_PARSER_H_
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_BATCHNORM_PARSER_H_
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_BATCHNORM_PARSER_H_
#include <vector>
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
@ -27,11 +27,13 @@ class CaffeBatchNormParser : public CaffeNodeParser {
public:
CaffeBatchNormParser() : CaffeNodeParser("batchnorm") {}
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
STATUS Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_BATCHNORM_PARSER_H_
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_BATCHNORM_PARSER_H_

@ -14,8 +14,8 @@
* limitations under the License.
*/
#include <memory>
#include "mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.h"
#include <memory>
const int32_t CONCAT_DEFAULT_AXIS = 1;
@ -25,33 +25,48 @@ STATUS CaffeConcatParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
op->name = proto.name();
MS_LOG(DEBUG) << "parse CaffeConcatParser";
if (op == nullptr) {
MS_LOG(ERROR) << "op is null";
return RET_NULL_PTR;
}
op->primitive = std::make_unique<schema::PrimitiveT>();
if (op->primitive == nullptr) {
MS_LOG(ERROR) << "op->primitive is null";
return RET_NULL_PTR;
}
std::unique_ptr<schema::ConcatT> attr = std::make_unique<schema::ConcatT>();
if (attr == nullptr) {
MS_LOG(ERROR) << "new op failed";
return RET_NULL_PTR;
}
const caffe::ConcatParameter concatParam = proto.concat_param();
if (concatParam.has_axis() && concatParam.has_concat_dim()) {
// MS_LOGE("Concat param in caffe have concat_dim and axis simultaneously,return fail");
MS_LOG(ERROR) << "Concat param in caffe have concat_dim and axis simultaneously, return fail";
return RET_ERROR;
}
if (concatParam.has_concat_dim()) {
// MS_LOGD("Concat dim , set axis:%d", concatParam.concat_dim());
MS_LOG(DEBUG) << "Concat dim , set axis: " << concatParam.concat_dim();
int32_t concat_dim_value = (int32_t)concatParam.concat_dim();
if (concat_dim_value < 0) {
// MS_LOGE("concat_dim value in model is smaller than 0:%d", concat_dim_value);
MS_LOG(ERROR) << "concat_dim value in model is smaller than 0:" << concat_dim_value;
return RET_ERROR;
}
attr->axis = concat_dim_value;
} else if (concatParam.has_axis()) {
// MS_LOGD("axis , set axis:%d", concatParam.axis());
MS_LOG(DEBUG) << "axis , set axis: " << concatParam.axis();
int32_t tmpInt = (int32_t)concatParam.axis();
attr->axis = tmpInt;
} else {
// MS_LOGD("default , set axis:%d", CONCAT_DEFAULT_AXIS);
MS_LOG(DEBUG) << "default , set axis: " << CONCAT_DEFAULT_AXIS;
attr->axis = CONCAT_DEFAULT_AXIS;
}
attr->n = proto.bottom_size();
op->primitive = std::make_unique<schema::PrimitiveT>();
op->name = proto.name();
op->primitive->value.type = schema::PrimitiveType_Concat;
op->primitive->value.value = attr.release();
return RET_OK;

@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_CONCAT_PARSER_H_
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_CONCAT_PARSER_H_
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CONCAT_PARSER_H_
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CONCAT_PARSER_H_
#include <vector>
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
@ -27,11 +27,13 @@ class CaffeConcatParser : public CaffeNodeParser {
public:
CaffeConcatParser() : CaffeNodeParser("concat") {}
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
STATUS Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_CONCAT_PARSER_H_
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CONCAT_PARSER_H_

@ -26,7 +26,8 @@ static const int CAFFE_CONV_BIAS_DIM_NUM = 1;
namespace mindspore {
namespace lite {
STATUS CaffeConvBaseParser::ParsePads(const caffe::ConvolutionParameter &convParam, std::vector<int64_t> *pad) {
STATUS CaffeConvBaseParser::ParsePads(const caffe::ConvolutionParameter &convParam,
std::vector<int64_t> *pad) {
/**
* padUp = padH;
* padDown = padH;
@ -35,7 +36,7 @@ STATUS CaffeConvBaseParser::ParsePads(const caffe::ConvolutionParameter &convPar
*/
if (convParam.has_pad_h() || convParam.has_pad_w()) {
if (convParam.pad_size() != 0) {
MS_LOG(ERROR) << "Either pad or pad_h/w should be specified; not both";
MS_LOG(ERROR) << "Either pad or pad_h/w should be specified; not both.";
return RET_ERROR;
}
@ -73,7 +74,8 @@ STATUS CaffeConvBaseParser::ParsePads(const caffe::ConvolutionParameter &convPar
return RET_OK;
}
STATUS CaffeConvBaseParser::ParseStrides(const caffe::ConvolutionParameter &convParam, std::vector<int64_t> *stride) {
STATUS CaffeConvBaseParser::ParseStrides(const caffe::ConvolutionParameter &convParam,
std::vector<int64_t> *stride) {
if (convParam.has_stride_h() || convParam.has_stride_w()) {
if (convParam.stride_size() != 0) {
MS_LOG(ERROR) << "Either stride or stride_h/w should be specified; not both";
@ -117,7 +119,8 @@ STATUS CaffeConvBaseParser::ParseDilations(const caffe::ConvolutionParameter &co
return RET_OK;
}
STATUS CaffeConvBaseParser::ParseKernels(const caffe::ConvolutionParameter &convParam, std::vector<int64_t> *kernel) {
STATUS CaffeConvBaseParser::ParseKernels(const caffe::ConvolutionParameter &convParam,
std::vector<int64_t> *kernel) {
if (convParam.has_kernel_h() || convParam.has_kernel_w()) {
if (convParam.kernel_size_size() != 0) {
MS_LOG(ERROR) << "Either kernel_size or kernel_h/w should be specified; not both.";
@ -146,7 +149,8 @@ STATUS CaffeConvBaseParser::ParseKernels(const caffe::ConvolutionParameter &conv
return RET_OK;
}
int CaffeConvBaseParser::ParseGroup(const caffe::ConvolutionParameter &convParam, const std::string &layerType) {
int CaffeConvBaseParser::ParseGroup(const caffe::ConvolutionParameter &convParam,
const std::string &layerType) {
// group default 1
int group = 0;
if (convParam.has_group()) {

@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_CONV_BASE_PARSER_H_
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_CONV_BASE_PARSER_H_
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CONV_BASE_PARSER_H_
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CONV_BASE_PARSER_H_
#include <string>
#include <vector>
@ -30,22 +30,28 @@ class CaffeConvBaseParser {
virtual ~CaffeConvBaseParser() {}
STATUS ParsePads(const caffe::ConvolutionParameter &conv_param, std::vector<int64_t> *pad);
STATUS ParsePads(const caffe::ConvolutionParameter &conv_param,
std::vector<int64_t> *pad);
STATUS ParseStrides(const caffe::ConvolutionParameter &conv_param, std::vector<int64_t> *stride);
STATUS ParseStrides(const caffe::ConvolutionParameter &conv_param,
std::vector<int64_t> *stride);
STATUS ParseDilations(const caffe::ConvolutionParameter &conv_param, std::vector<int64_t> *dilation);
STATUS ParseDilations(const caffe::ConvolutionParameter &conv_param,
std::vector<int64_t> *dilation);
STATUS ParseKernels(const caffe::ConvolutionParameter &conv_param, std::vector<int64_t> *kernel);
STATUS ParseKernels(const caffe::ConvolutionParameter &conv_param,
std::vector<int64_t> *kernel);
int ParseGroup(const caffe::ConvolutionParameter &convParam, const std::string &layerType);
int ParseGroup(const caffe::ConvolutionParameter &convParam,
const std::string &layerType);
int ParseChannelOut(const caffe::ConvolutionParameter &convParam, int32_t *channelOut);
STATUS ParseWeight(const caffe::LayerParameter &weight, std::vector<schema::TensorT *> *weightVec);
STATUS ParseWeight(const caffe::LayerParameter &weight,
std::vector<schema::TensorT *> *weightVec);
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_CONV_BASE_PARSER_H_
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CONV_BASE_PARSER_H_

@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_CONVERTER_H_
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_CONVERTER_H_
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CONVERTER_H_
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CONVERTER_H_
#include <string>
#include <memory>
@ -32,5 +32,5 @@ class CaffeConverter : public Converter {
};
} // namespace mindspore::lite
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_CONVERTER_H_
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CONVERTER_H_

@ -14,21 +14,23 @@
* limitations under the License.
*/
#include <memory>
#include "mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.h"
#include "utils/log_adapter.h"
#include <memory>
namespace mindspore {
namespace lite {
void CaffeConvolutionParser::ParseGroupConvolution(schema::CNodeT *op, schema::Conv2DT *attr) {
if (attr == nullptr || attr->group == 1) {
return;
STATUS CaffeConvolutionParser::ParseGroupConvolution(schema::CNodeT *op,
schema::Conv2DT *attr) {
if (attr->group == 1) {
return RET_OK;
}
std::unique_ptr<schema::DepthwiseConv2DT> depthwiseConv2DParam = std::make_unique<schema::DepthwiseConv2DT>();
std::unique_ptr<schema::DepthwiseConv2DT> depthwiseConv2DParam
= std::make_unique<schema::DepthwiseConv2DT>();
if (depthwiseConv2DParam == nullptr) {
MS_LOG(ERROR) << "new DepthwiseConv2DT failed";
return;
MS_LOG(ERROR) << "new op failed";
return RET_ERROR;
}
depthwiseConv2DParam->format = attr->format;
depthwiseConv2DParam->channelIn = attr->channelIn;
depthwiseConv2DParam->channelMultiplier = attr->channelOut / attr->channelIn;
@ -48,19 +50,30 @@ void CaffeConvolutionParser::ParseGroupConvolution(schema::CNodeT *op, schema::C
delete attr;
op->primitive->value.type = schema::PrimitiveType_DepthwiseConv2D;
op->primitive->value.value = depthwiseConv2DParam.release();
return RET_OK;
}
STATUS CaffeConvolutionParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight,
schema::CNodeT *op, std::vector<schema::TensorT *> *weightVec) {
op->name = proto.name();
std::unique_ptr<schema::Conv2DT> attr(new (std::nothrow) schema::Conv2DT());
if (attr == nullptr) {
MS_LOG(ERROR) << "new Conv2DT failed";
return RET_ERROR;
STATUS CaffeConvolutionParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
MS_LOG(DEBUG) << "parse CaffeConvolutionParser";
if (op == nullptr) {
MS_LOG(ERROR) << "op is null";
return RET_NULL_PTR;
}
op->primitive = std::make_unique<schema::PrimitiveT>();
if (op->primitive == nullptr) {
MS_LOG(ERROR) << "op->primitive is null";
return RET_NULL_PTR;
}
std::unique_ptr<schema::Conv2DT> attr(new (std::nothrow) schema::Conv2DT());
attr->format = schema::Format_NCHW;
const caffe::ConvolutionParameter convParam = proto.convolution_param();
const caffe::ConvolutionParameter convParam = proto.convolution_param();
CaffeConvBaseParser convParser;
// parse pad
std::vector<int64_t> pad(4, 0);
@ -119,14 +132,21 @@ STATUS CaffeConvolutionParser::Parse(const caffe::LayerParameter &proto, const c
attr->channelIn = weightBlob.channels() * attr->group;
}
attr->padMode = schema::PadMode_CAFFE;
op->primitive = std::make_unique<schema::PrimitiveT>();
op->name = proto.name();
op->primitive->value.type = schema::PrimitiveType_Conv2D;
op->primitive->value.value = attr.get();
ParseGroupConvolution(op, attr.release());
status = ParseGroupConvolution(op, attr.release());
if (status != RET_OK) {
MS_LOG(ERROR) << "Parse group convolution failed";
return RET_ERROR;
}
status = convParser.ParseWeight(weight, weightVec);
if (status != RET_OK) {
MS_LOG(ERROR) << "ParseWeight for " << proto.name().c_str() << " failed";
return RET_ERROR;
}
return status;

@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_CONVOLUTION_PARSER_H_
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_CONVOLUTION_PARSER_H_
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CONVOLUTION_PARSER_H_
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CONVOLUTION_PARSER_H_
#include <vector>
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
@ -28,14 +28,17 @@ class CaffeConvolutionParser : public CaffeNodeParser {
public:
CaffeConvolutionParser() : CaffeNodeParser("convolution") {}
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
STATUS Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;
private:
void ParseGroupConvolution(schema::CNodeT *op, schema::Conv2DT *attr);
STATUS ParseGroupConvolution(schema::CNodeT *op,
schema::Conv2DT *attr);
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_CONVOLUTION_PARSER_H_
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CONVOLUTION_PARSER_H_

@ -14,8 +14,8 @@
* limitations under the License.
*/
#include <memory>
#include "mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.h"
#include <memory>
const int32_t CROP_AXIS = 2;
@ -25,7 +25,23 @@ STATUS CaffeCropParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
MS_LOG(DEBUG) << "parse CaffeCropParser";
if (op == nullptr) {
MS_LOG(ERROR) << "op is null";
return RET_NULL_PTR;
}
op->primitive = std::make_unique<schema::PrimitiveT>();
if (op->primitive == nullptr) {
MS_LOG(ERROR) << "op->primitive is null";
return RET_NULL_PTR;
}
std::unique_ptr<schema::CropT> attr = std::make_unique<schema::CropT>();
if (attr == nullptr) {
MS_LOG(ERROR) << "new op failed";
return RET_NULL_PTR;
}
if (!proto.has_crop_param()) {
attr->axis = CROP_AXIS;
std::vector<int64_t> offsets(2, 0);
@ -34,7 +50,7 @@ STATUS CaffeCropParser::Parse(const caffe::LayerParameter &proto,
const caffe::CropParameter cropParam = proto.crop_param();
if (cropParam.has_axis()) {
if (cropParam.axis() == -1) {
// MS_LOGW("axis with -1 may lead to calculation errors when input less than 4 dims.");
MS_LOG(WARNING) << "axis with -1 may lead to calculation errors when input less than 4 dims.";
}
attr->axis = cropParam.axis();
} else {
@ -49,9 +65,10 @@ STATUS CaffeCropParser::Parse(const caffe::LayerParameter &proto,
attr->offsets = offsets;
}
}
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.value = attr.release();
op->name = proto.name();
op->primitive->value.type = schema::PrimitiveType_Crop;
op->primitive->value.value = attr.release();
return RET_OK;
}

@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_CROP_PARSER_H_
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_CROP_PARSER_H_
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CROP_PARSER_H_
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CROP_PARSER_H_
#include <vector>
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
@ -27,11 +27,13 @@ class CaffeCropParser : public CaffeNodeParser {
public:
CaffeCropParser() : CaffeNodeParser("crop") {}
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
STATUS Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_CROP_PARSER_H_
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CROP_PARSER_H_

@ -14,21 +14,22 @@
* limitations under the License.
*/
#include <memory>
#include "mindspore/lite/tools/converter/parser/caffe/caffe_deconvolution_parser.h"
#include <memory>
namespace mindspore {
namespace lite {
void CaffeDeconvolutionParser::ParseGroupDeconvolution(schema::CNodeT *op, schema::DeConv2DT *attr) {
if (attr == nullptr || attr->group == 1) {
return;
STATUS CaffeDeconvolutionParser::ParseGroupDeconvolution(schema::CNodeT *op,
schema::DeConv2DT *attr) {
if (attr->group == 1) {
return RET_OK;
}
std::unique_ptr<schema::DeDepthwiseConv2DT> deDepthwiseConv2DParam
= std::make_unique<schema::DeDepthwiseConv2DT>();
if (deDepthwiseConv2DParam == nullptr) {
MS_LOG(ERROR) << "new DeDepthwiseConv2DT failed";
return;
MS_LOG(ERROR) << "new op failed";
return RET_ERROR;
}
deDepthwiseConv2DParam->format = attr->format;
deDepthwiseConv2DParam->channelIn = attr->channelOut;
@ -49,14 +50,30 @@ void CaffeDeconvolutionParser::ParseGroupDeconvolution(schema::CNodeT *op, schem
delete attr;
op->primitive->value.type = schema::PrimitiveType_DeDepthwiseConv2D;
op->primitive->value.value = deDepthwiseConv2DParam.release();
return RET_OK;
}
STATUS CaffeDeconvolutionParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight,
schema::CNodeT *op, std::vector<schema::TensorT *> *weightVec) {
op->name = proto.name();
auto *attr = new schema::DeConv2DT();
STATUS CaffeDeconvolutionParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
MS_LOG(DEBUG) << "parse CaffeDeconvolutionParser";
if (op == nullptr) {
MS_LOG(ERROR) << "op is null";
return RET_NULL_PTR;
}
op->primitive = std::make_unique<schema::PrimitiveT>();
if (op->primitive == nullptr) {
MS_LOG(ERROR) << "op->primitive is null";
return RET_NULL_PTR;
}
std::unique_ptr<schema::DeConv2DT> attr(new (std::nothrow) schema::DeConv2DT());
attr->format = schema::Format_NCHW;
const caffe::ConvolutionParameter convParam = proto.convolution_param();
const caffe::ConvolutionParameter convParam = proto.convolution_param();
CaffeConvBaseParser convParser;
// parse pad
std::vector<int64_t> pad(4, 0);
@ -118,13 +135,21 @@ STATUS CaffeDeconvolutionParser::Parse(const caffe::LayerParameter &proto, const
attr->channelIn = weightBlob.num() * attr->group;
}
attr->padMode = schema::PadMode_CAFFE;
op->primitive = std::make_unique<schema::PrimitiveT>();
op->name = proto.name();
op->primitive->value.type = schema::PrimitiveType_DeConv2D;
op->primitive->value.value = attr;
ParseGroupDeconvolution(op, attr);
op->primitive->value.value = attr.get();
status = ParseGroupDeconvolution(op, attr.release());
if (status != RET_OK) {
MS_LOG(ERROR) << "Parse group deconvolution failed";
return RET_ERROR;
}
status = convParser.ParseWeight(weight, weightVec);
if (status != RET_OK) {
MS_LOG(ERROR) << "ParseWeight for " << proto.name().c_str() << " failed";
return RET_ERROR;
}
return status;

@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_DECONVOLUTION_PARSER_H_
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_DECONVOLUTION_PARSER_H_
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_DECONVOLUTION_PARSER_H_
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_DECONVOLUTION_PARSER_H_
#include <vector>
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
@ -28,14 +28,17 @@ class CaffeDeconvolutionParser : public CaffeNodeParser {
public:
CaffeDeconvolutionParser() : CaffeNodeParser("deconvolution") {}
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
STATUS Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;
private:
void ParseGroupDeconvolution(schema::CNodeT *op, schema::DeConv2DT *attr);
STATUS ParseGroupDeconvolution(schema::CNodeT *op,
schema::DeConv2DT *attr);
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_DECONVOLUTION_PARSER_H_
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_DECONVOLUTION_PARSER_H_

@ -14,19 +14,36 @@
* limitations under the License.
*/
#include "mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.h"
#include <cmath>
#include <memory>
#include "mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.h"
#include "utils/log_adapter.h"
const int ELTWISE_MIN_INPUT_SIZE = 2;
const float ELTWISE_SUM_COEFF_EPSILON = 1e-5;
namespace mindspore {
namespace lite {
STATUS CaffeEltwiseParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight,
schema::CNodeT *op, std::vector<schema::TensorT *> *weightVec) {
STATUS CaffeEltwiseParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
MS_LOG(DEBUG) << "parse CaffeEltwiseParser";
if (op == nullptr) {
MS_LOG(ERROR) << "op is null";
return RET_NULL_PTR;
}
op->primitive = std::make_unique<schema::PrimitiveT>();
if (op->primitive == nullptr) {
MS_LOG(ERROR) << "op->primitive is null";
return RET_NULL_PTR;
}
std::unique_ptr<schema::EltwiseT> attr = std::make_unique<schema::EltwiseT>();
if (attr == nullptr) {
MS_LOG(ERROR) << "new op failed";
return RET_NULL_PTR;
}
if (proto.bottom_size() < ELTWISE_MIN_INPUT_SIZE) {
MS_LOG(ERROR) << "Eltwise Op " << proto.name() << " need at least 2 inputs,but input size is "
<< proto.bottom_size();
@ -37,7 +54,7 @@ STATUS CaffeEltwiseParser::Parse(const caffe::LayerParameter &proto, const caffe
if (eltwiseParam.coeff_size() != 0 && eltwiseParam.coeff_size() != proto.bottom_size()) {
MS_LOG(ERROR) << "Coeff size(" << eltwiseParam.coeff_size()
<< ") check fail, Eltwise Layer takes one coefficient per bottom blob.";
return RET_PARAM_INVALID;
return RET_ERROR;
}
if (eltwiseParam.operation() == caffe::EltwiseParameter::PROD && eltwiseParam.coeff_size() != 0) {
@ -64,12 +81,13 @@ STATUS CaffeEltwiseParser::Parse(const caffe::LayerParameter &proto, const caffe
break;
default:
MS_LOG(ERROR) << "Eltwise parse params fail, unsupported opration: " << eltwiseParam.operation();
return RET_PARAM_INVALID;
return RET_ERROR;
}
} else {
attr->mode = schema::EltwiseMode_SUM;
}
op->primitive = std::make_unique<schema::PrimitiveT>();
op->name = proto.name();
op->primitive->value.type = schema::PrimitiveType_Eltwise;
op->primitive->value.value = attr.release();
return RET_OK;

@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_ELTWISE_PARSER_H_
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_ELTWISE_PARSER_H_
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_ELTWISE_PARSER_H_
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_ELTWISE_PARSER_H_
#include <vector>
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
@ -27,11 +27,13 @@ class CaffeEltwiseParser : public CaffeNodeParser {
public:
CaffeEltwiseParser() : CaffeNodeParser("eltwise") {}
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
STATUS Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_ELTWISE_PARSER_H_
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_ELTWISE_PARSER_H_

@ -13,20 +13,34 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <memory>
#include "mindspore/lite/tools/converter/parser/caffe/caffe_flatten_parser.h"
#include <memory>
namespace mindspore {
namespace lite {
STATUS CaffeFlattenParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight,
schema::CNodeT *op, std::vector<schema::TensorT *> *weightVec) {
STATUS CaffeFlattenParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
MS_LOG(DEBUG) << "parse CaffeFlattenParser";
if (op == nullptr) {
// MS_LOG(ERROR) << "null pointer dereferencing.";
MS_LOG(ERROR) << "op is null";
return RET_NULL_PTR;
}
op->primitive = std::make_unique<schema::PrimitiveT>();
if (op->primitive == nullptr) {
MS_LOG(ERROR) << "op->primitive is null";
return RET_NULL_PTR;
}
std::unique_ptr<schema::FlattenT> attr = std::make_unique<schema::FlattenT>();
if (attr == nullptr) {
MS_LOG(ERROR) << "new op failed";
return RET_NULL_PTR;
}
op->primitive = std::make_unique<schema::PrimitiveT>();
op->name = proto.name();
op->primitive->value.type = schema::PrimitiveType_Flatten;
op->primitive->value.value = attr.release();
return RET_OK;

@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef PREDICT_CONVERTER_PARSER_CAFFE_CAFFE_FLATTEN_PARSER_H_
#define PREDICT_CONVERTER_PARSER_CAFFE_CAFFE_FLATTEN_PARSER_H_
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_FLATTEN_PARSER_H_
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_FLATTEN_PARSER_H_
#include <vector>
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
@ -27,10 +27,12 @@ class CaffeFlattenParser : public CaffeNodeParser {
public:
CaffeFlattenParser() : CaffeNodeParser("flatten") {}
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
STATUS Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_CONVERTER_PARSER_CAFFE_CAFFE_FLATTEN_PARSER_H_
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_FLATTEN_PARSER_H_

@ -14,18 +14,35 @@
* limitations under the License.
*/
#include <memory>
#include "mindspore/lite/tools/converter/parser/caffe/caffe_innerproduct_parser.h"
#include <memory>
namespace mindspore {
namespace lite {
STATUS CaffeInnerProductParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight,
schema::CNodeT *op, std::vector<schema::TensorT *> *weightVec) {
const caffe::InnerProductParameter innerProductParam = proto.inner_product_param();
STATUS CaffeInnerProductParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
MS_LOG(DEBUG) << "parse CaffeInnerProductParser";
if (op == nullptr) {
MS_LOG(ERROR) << "op is null";
return RET_NULL_PTR;
}
op->primitive = std::make_unique<schema::PrimitiveT>();
if (op->primitive == nullptr) {
MS_LOG(ERROR) << "op->primitive is null";
return RET_NULL_PTR;
}
std::unique_ptr<schema::FullConnectionT> attr = std::make_unique<schema::FullConnectionT>();
if (attr == nullptr) {
MS_LOG(ERROR) << "new op failed";
return RET_NULL_PTR;
}
const caffe::InnerProductParameter innerProductParam = proto.inner_product_param();
if (!innerProductParam.has_num_output()) {
// MS_LOGE("InnerProduct Parse num_output for %s failed.", proto.name().c_str());
MS_LOG(ERROR) << "InnerProduct Parse num_output for " << proto.name().c_str() << " failed.";
return RET_ERROR;
}
@ -33,7 +50,7 @@ STATUS CaffeInnerProductParser::Parse(const caffe::LayerParameter &proto, const
attr->axis = 1;
attr->useAxis = true;
} else {
// MS_LOG(ERROR) << "InnerProduct Parse axis only support default 1, but actually " << innerProductParam.axis();
MS_LOG(ERROR) << "InnerProduct Parse axis only support default 1, but actually " << innerProductParam.axis();
return RET_ERROR;
}
@ -44,14 +61,14 @@ STATUS CaffeInnerProductParser::Parse(const caffe::LayerParameter &proto, const
// parse weight
if (weight.blobs_size() == 0) {
// MS_LOGE("InnerProduct No filter data in layer %s", weight.name().c_str());
MS_LOG(ERROR) << "InnerProduct No filter data in layer " << weight.name().c_str();
return RET_ERROR;
}
// parse filter
auto filter = ConvertWeight(weight.blobs(0));
if (filter == nullptr) {
// MS_LOGE("InnerProduct parse weight for layer %s failed", weight.name().c_str());
MS_LOG(ERROR) << "InnerProduct parse weight for layer " << weight.name().c_str() << " failed";
return RET_ERROR;
}
weightVec->push_back(filter);
@ -60,14 +77,15 @@ STATUS CaffeInnerProductParser::Parse(const caffe::LayerParameter &proto, const
if (innerProductParam.bias_term() && weight.blobs_size() > 1) {
auto bias = ConvertWeight(weight.blobs(1));
if (bias == nullptr) {
// MS_LOGE("InnerProduct parse bias for layer %s failed", weight.name().c_str());
MS_LOG(ERROR) << "InnerProduct parse bias for layer " << weight.name().c_str() << " failed";
return RET_ERROR;
}
weightVec->push_back(bias);
}
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.value = attr.release();
op->name = proto.name();
op->primitive->value.type = schema::PrimitiveType_FullConnection;
op->primitive->value.value = attr.release();
return RET_OK;
}

@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_INNERPRODUCT_PARSER_H_
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_INNERPRODUCT_PARSER_H_
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_INNERPRODUCT_PARSER_H_
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_INNERPRODUCT_PARSER_H_
#include <vector>
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
@ -27,11 +27,13 @@ class CaffeInnerProductParser : public CaffeNodeParser {
public:
CaffeInnerProductParser() : CaffeNodeParser("innerproduct") {}
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
STATUS Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_INNERPRODUCT_PARSER_H_
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_INNERPRODUCT_PARSER_H_

@ -32,6 +32,7 @@ STATUS CaffeInspector::InspectModel(const caffe::NetParameter &proto) {
SetTopsAndBottoms();
FindInputAndOutput();
return RET_OK;
}

@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_INSPECTOR_H_
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_INSPECTOR_H_
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_INSPECTOR_H_
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_INSPECTOR_H_
#include <set>
#include <string>
@ -52,5 +52,5 @@ using CaffeInspectorPtr = std::shared_ptr<CaffeInspector>;
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_INSPECTOR_H_
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_INSPECTOR_H_

@ -14,19 +14,37 @@
* limitations under the License.
*/
#include <memory>
#include "mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.h"
#include <memory>
namespace mindspore {
namespace lite {
STATUS CaffeInterpParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight,
schema::CNodeT *op, std::vector<schema::TensorT *> *weightVec) {
STATUS CaffeInterpParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
MS_LOG(DEBUG) << "parse CaffeInterpParser";
if (op == nullptr) {
MS_LOG(ERROR) << "op is null";
return RET_NULL_PTR;
}
op->primitive = std::make_unique<schema::PrimitiveT>();
if (op->primitive == nullptr) {
MS_LOG(ERROR) << "op->primitive is null";
return RET_NULL_PTR;
}
std::unique_ptr<schema::ResizeT> attr = std::make_unique<schema::ResizeT>();
if (attr == nullptr) {
MS_LOG(ERROR) << "new op failed";
return RET_NULL_PTR;
}
const caffe::InterpParameter interpParam = proto.interp_param();
if (interpParam.has_height()) {
int64_t height = interpParam.height();
if (height < 0) {
// MS_LOGE("Interp height must be > 0");
MS_LOG(ERROR) << "Interp height must be > 0";
return RET_ERROR;
}
attr->newHeight = height;
@ -35,17 +53,15 @@ STATUS CaffeInterpParser::Parse(const caffe::LayerParameter &proto, const caffe:
if (interpParam.has_width()) {
int64_t width = interpParam.width();
if (width < 0) {
// MS_LOGE("Interp width must be > 0");
MS_LOG(ERROR) << "Interp width must be > 0";
return RET_ERROR;
}
attr->newWidth = width;
}
attr->alignCorners = true;
attr->method = schema::ResizeMethod_BILINEAR;
op->name = proto.name();
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Resize;
op->primitive->value.value = attr.release();
return RET_OK;

@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_INTERP_PARSER_H_
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_INTERP_PARSER_H_
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_INTERP_PARSER_H_
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_INTERP_PARSER_H_
#include <vector>
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
@ -27,11 +27,13 @@ class CaffeInterpParser : public CaffeNodeParser {
public:
CaffeInterpParser() : CaffeNodeParser("Interp") {}
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
STATUS Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_INTERP_PARSER_H_
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_INTERP_PARSER_H_

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save