support onnx loop model ssd-mobilenet

pull/11673/head
zhengjun10 4 years ago
parent 3708624a25
commit 0e3671a8a0

@ -0,0 +1,34 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/ops/primitive_c.h"
#include "src/ops/populate/populate_register.h"
namespace mindspore {
namespace lite {
OpParameter *PopulateNonZeroParameter(const mindspore::lite::PrimitiveC *primitive) {
auto nonzero_parameter = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter)));
if (nonzero_parameter == nullptr) {
MS_LOG(ERROR) << "malloc Where parameter failed.";
return nullptr;
}
memset(nonzero_parameter, 0, sizeof(OpParameter));
nonzero_parameter->type_ = primitive->Type();
return nonzero_parameter;
}
Registry NonZeroParameterRegistry(schema::PrimitiveType_NonZero, PopulateNonZeroParameter);
} // namespace lite
} // namespace mindspore

@ -30,6 +30,20 @@ namespace mindspore::kernel {
int TensorListSetItemCPUKernel::Init() { return RET_OK; }
int TensorListSetItemCPUKernel::IncrementOutputSize(int origin_size) {
output0_ = reinterpret_cast<lite::TensorList *>(out_tensors_[0]);
int new_tensors_size = origin_size + 1;
output0_->set_shape({new_tensors_size});
std::vector<std::vector<int>> out_shape;
out_shape.resize(new_tensors_size, in_tensors_[2]->shape());
auto ret = output0_->MallocTensorListData(in_tensors_[2]->data_type(), out_shape);
if (ret != RET_OK) {
MS_LOG(ERROR) << "increment output size malloc tensorlist data error";
return ret;
}
return RET_OK;
}
int TensorListSetItemCPUKernel::Run() {
input0_ = reinterpret_cast<lite::TensorList *>(in_tensors_[0]);
if (dtype_ != kTypeUnknown && dtype_ != input0_->tensors_data_type()) {
@ -47,8 +61,10 @@ int TensorListSetItemCPUKernel::Run() {
}
index_ = reinterpret_cast<int *>(in_tensors_[1]->data_c())[0];
if (index_ < 0 || index_ > dim0) {
MS_LOG(ERROR) << "index tensor:[" << index_ << "] must be in [0, " << dim0 << "]!";
return RET_ERROR;
if (IncrementOutputSize(output0_->shape()[0]) != RET_OK) {
MS_LOG(ERROR) << "Resizeoutput Error ,index tensor:[" << index_ << "] must be in [0, " << dim0 << "]!";
return RET_ERROR;
}
}
input2_ = in_tensors_[2];
MS_ASSERT(input2_ != nullptr);
@ -57,6 +73,13 @@ int TensorListSetItemCPUKernel::Run() {
}
output0_ = reinterpret_cast<lite::TensorList *>(out_tensors_[0]);
MS_ASSERT(output0_ != nullptr);
// new loop count
if (output0_->tensors().empty() && input0_->tensors().empty()) {
if (IncrementOutputSize(0) != RET_OK) {
MS_LOG(ERROR) << "Resizeoutput Error!";
return RET_ERROR;
}
}
// copy each tensor in tensors_
for (int i = 0; i < output0_->ElementsNum(); ++i) {
if (i == index_) {
@ -92,10 +115,6 @@ int TensorListSetItemCPUKernel::Run() {
}
if (src->data_type() != kTypeUnknown) {
if (src->Size() != dst->Size()) {
MS_LOG(ERROR) << "src->Size():" << src->Size() << " must be equal to dst->Size():" << dst->Size();
return RET_ERROR;
}
auto ret = lite::Tensor::CopyTensorData(*src, dst);
if (ret != RET_OK) {
MS_LOG(ERROR) << "CopyTensorData[" << i << "] is failed!";

@ -36,6 +36,7 @@ class TensorListSetItemCPUKernel : public LiteKernel {
int Init() override;
int ReSize() override;
int Run() override;
int IncrementOutputSize(int origin_size);
private:
lite::TensorList *input0_ = nullptr;

@ -79,6 +79,33 @@ TransposeCPUKernel::~TransposeCPUKernel() {
}
}
int TransposeCPUKernel::NhNcTranspose(lite::Tensor *in_tensor, lite::Tensor *out_tensor, TransposeParameter *param) {
auto out_shape = out_tensor->shape();
if (in_tensor->shape().size() == 4 && param->perm_[0] == 0 && param->perm_[1] == 2 && param->perm_[2] == 3 &&
param->perm_[3] == 1) {
if (in_tensor->data_type() == kNumberTypeFloat32) {
PackNCHWToNHWCFp32(in_tensor->MutableData(), out_tensor->MutableData(), out_shape[0], out_shape[1] * out_shape[2],
out_shape[3]);
} else if (in_tensor->data_type() == kNumberTypeInt8) {
PackNCHWToNHWCInt8(in_tensor->MutableData(), out_tensor->MutableData(), out_shape[0], out_shape[1] * out_shape[2],
out_shape[3]);
}
return RET_OK;
}
if (in_tensor->shape().size() == 4 && param->perm_[0] == 0 && param->perm_[1] == 3 && param->perm_[2] == 1 &&
param->perm_[3] == 2) {
if (in_tensor->data_type() == kNumberTypeFloat32) {
PackNHWCToNCHWFp32(in_tensor->MutableData(), out_tensor->MutableData(), out_shape[0], out_shape[2] * out_shape[3],
out_shape[1]);
} else if (in_tensor->data_type() == kNumberTypeInt8) {
PackNHWCToNCHWInt8(in_tensor->MutableData(), out_tensor->MutableData(), out_shape[0], out_shape[2] * out_shape[3],
out_shape[1]);
}
return RET_OK;
}
return RET_ERROR;
}
int TransposeCPUKernel::Run() {
MS_ASSERT(in_tensors_.size() == 1 || in_tensors_.size() == 2);
MS_ASSERT(out_tensors_.size() == 1);
@ -110,28 +137,9 @@ int TransposeCPUKernel::Run() {
memcpy(out_data_, in_data_, in_tensor->ElementsNum() * sizeof(float));
return RET_OK;
}
auto out_shape = out_tensor->shape();
if (in_tensor->shape().size() == 4 && param->perm_[0] == 0 && param->perm_[1] == 2 && param->perm_[2] == 3 &&
param->perm_[3] == 1) {
if (in_tensor->data_type() == kNumberTypeFloat32) {
PackNCHWToNHWCFp32(in_tensor->MutableData(), out_tensor->MutableData(), out_shape[0], out_shape[1] * out_shape[2],
out_shape[3]);
} else if (in_tensor->data_type() == kNumberTypeInt8) {
PackNCHWToNHWCInt8(in_tensor->MutableData(), out_tensor->MutableData(), out_shape[0], out_shape[1] * out_shape[2],
out_shape[3]);
}
return RET_OK;
}
if (in_tensor->shape().size() == 4 && param->perm_[0] == 0 && param->perm_[1] == 3 && param->perm_[2] == 1 &&
param->perm_[3] == 2) {
if (in_tensor->data_type() == kNumberTypeFloat32) {
PackNHWCToNCHWFp32(in_tensor->MutableData(), out_tensor->MutableData(), out_shape[0], out_shape[2] * out_shape[3],
out_shape[1]);
} else if (in_tensor->data_type() == kNumberTypeInt8) {
PackNHWCToNCHWInt8(in_tensor->MutableData(), out_tensor->MutableData(), out_shape[0], out_shape[2] * out_shape[3],
out_shape[1]);
}
return RET_OK;
auto ret = NhNcTranspose(in_tensor, out_tensor, param);
if (ret == RET_OK) {
return ret;
}
if (in_tensor->data_type() == kNumberTypeInt8) {
MS_LOG(ERROR) << "not support now";
@ -155,7 +163,7 @@ int TransposeCPUKernel::Run() {
}
MS_ASSERT(out_shape_);
auto ret = DoTransposeFp32(in_data_, out_data_, out_shape_, param, dim_size_, position_);
ret = DoTransposeFp32(in_data_, out_data_, out_shape_, param, dim_size_, position_);
if (dims > MAX_TRANSPOSE_DIM_SIZE) {
context_->allocator->Free(dim_size_);
context_->allocator->Free(position_);
@ -171,6 +179,7 @@ int TransposeCPUKernel::Run() {
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Transpose, LiteKernelCreator<TransposeCPUKernel>)
REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Transpose, LiteKernelCreator<TransposeCPUKernel>)
REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Transpose, LiteKernelCreator<TransposeCPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Nchw2Nhwc, LiteKernelCreator<TransposeCPUKernel>)
REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Nchw2Nhwc, LiteKernelCreator<TransposeCPUKernel>)

@ -35,6 +35,7 @@ class TransposeCPUKernel : public LiteKernel {
int Init() override;
int ReSize() override;
int Run() override;
int NhNcTranspose(lite::Tensor *in_tensor, lite::Tensor *out_tensor, TransposeParameter *param);
protected:
float *in_data_ = nullptr;

@ -35,4 +35,4 @@ ml_video_edit_img_segment_adaptise.pb;2
ml_video_edit_img_segment_adaptise_pb2tflite.tflite;2
ml_video_edit_video_segment_gauss_adaptis_part2.pb;2
ml_video_edit_video_segment_gauss_adaptis_part2_pb2tflite.tflite;2
# tiny-yolov3-11.onnx;2;1,416,416,3:1,2
tiny-yolov3-11.onnx;2;1,416,416,3:1,2

@ -44,6 +44,44 @@ STATUS FormatTransPass::Run(schema::MetaGraphT *graph) {
return RET_OK;
}
STATUS FormatTransPass::GetInsertFormatTrans(const schema::CNodeT &node, FormatTransNodeType *beforeNodeType,
FormatTransNodeType *afterNodeType) {
if (fmkType == converter::FmkType_TFLITE) { // inference by nhwc
return RET_NO_CHANGE;
} else if (fmkType == converter::FmkType_CAFFE) { // inference by nchw
if (!IsContain(GetNhwcOpList(), GetCNodeTType(node))) {
return RET_NO_CHANGE;
}
*beforeNodeType = kNCHW2NHWC;
*afterNodeType = kNHWC2NCHW;
return RET_OK;
} else if (fmkType == converter::FmkType_MS) {
if (!IsContain(GetNhwcOpList(), GetCNodeTType(node))) {
return RET_NO_CHANGE;
}
*beforeNodeType = kNCHW2NHWC;
*afterNodeType = kNHWC2NCHW;
return RET_OK;
} else if (fmkType == converter::FmkType_ONNX) {
if (!IsContain(GetNhwcOpList(), GetCNodeTType(node))) {
return RET_NO_CHANGE;
}
*beforeNodeType = kNCHW2NHWC;
*afterNodeType = kNHWC2NCHW;
return RET_OK;
} else if (fmkType == converter::FmkType_TF) {
if (IsContain(GetNhwcOpList(), GetCNodeTType(node)) && GetFormat(node) == schema::Format_NCHW) {
*beforeNodeType = kNCHW2NHWC;
*afterNodeType = kNHWC2NCHW;
return RET_OK;
}
return RET_NO_CHANGE;
} else {
MS_LOG(ERROR) << "Unsupported fmk: " << fmkType;
return RET_ERROR;
}
}
STATUS FormatTransPass::DoModelInputFormatTrans(schema::MetaGraphT *graph) {
if (fmkType == converter::FmkType_TF || fmkType == converter::FmkType_TFLITE) {
return RET_OK;
@ -53,6 +91,14 @@ STATUS FormatTransPass::DoModelInputFormatTrans(schema::MetaGraphT *graph) {
if (graph->nodes.empty()) {
return RET_OK;
}
// onnx input format may be nhwc
if (fmkType == converter::FmkType_ONNX && graph->inputIndex.size() == 1) {
auto &input_tensor = graph->allTensors.at(graph->inputIndex[0]);
auto &input_dims = input_tensor->dims;
if (input_dims.size() == 4 && input_dims[3] != -1 && input_dims[1] == -1) {
return RET_OK;
}
}
auto graphInputIdxes = graph->inputIndex;
for (size_t i = 0; i < graphInputIdxes.size(); i++) {
bool transed = false;
@ -100,38 +146,15 @@ STATUS FormatTransPass::DoNodeInoutFormatTrans(schema::MetaGraphT *graph) {
MS_ASSERT(graph != nullptr);
// insert before and after the op cal by nchw/nc4hw4
for (auto iter = graph->nodes.begin(); iter != graph->nodes.end(); iter++) {
FormatTransNodeType beforeNodeType, afterNodeType;
if (fmkType == converter::FmkType_TFLITE) { // inference by nhwc
FormatTransNodeType beforeNodeType = kNCHW2NHWC;
FormatTransNodeType afterNodeType = kNHWC2NCHW;
STATUS status = RET_OK;
status = GetInsertFormatTrans(**iter, &beforeNodeType, &afterNodeType);
if (status == RET_NO_CHANGE) {
continue;
} else if (fmkType == converter::FmkType_CAFFE) { // inference by nchw
if (!IsContain(GetNhwcOpList(), GetCNodeTType(**iter))) {
continue;
}
beforeNodeType = kNCHW2NHWC;
afterNodeType = kNHWC2NCHW;
} else if (fmkType == converter::FmkType_MS) {
if (!IsContain(GetNhwcOpList(), GetCNodeTType(**iter))) {
continue;
}
beforeNodeType = kNCHW2NHWC;
afterNodeType = kNHWC2NCHW;
} else if (fmkType == converter::FmkType_ONNX) {
if (!IsContain(GetNhwcOpList(), GetCNodeTType(**iter))) {
continue;
}
beforeNodeType = kNCHW2NHWC;
afterNodeType = kNHWC2NCHW;
} else if (fmkType == converter::FmkType_TF) {
auto &node = *iter;
if (IsContain(GetNhwcOpList(), GetCNodeTType(**iter)) && GetFormat(node) == schema::Format_NCHW) {
beforeNodeType = kNCHW2NHWC;
afterNodeType = kNHWC2NCHW;
} else {
continue;
}
} else {
MS_LOG(ERROR) << "Unsupported fmk: " << fmkType;
return RET_ERROR;
}
if (status != RET_OK) {
return status;
}
auto &node = *iter;
auto nodeName = node->name;
@ -150,7 +173,6 @@ STATUS FormatTransPass::DoNodeInoutFormatTrans(schema::MetaGraphT *graph) {
if (node->primitive->value.type == schema::PrimitiveType_DepthToSpace) {
reinterpret_cast<schema::DepthToSpaceT *>(attr)->format = schema::Format_NHWC;
}
STATUS status = RET_OK;
#ifdef SUPPORT_TRAIN
if (IsContain(GetNhwcAllInputOpList(), GetCNodeTType(**iter))) {
int idx_num = node->inputIndex.size();
@ -250,18 +272,18 @@ void FormatTransPass::SetQuantType(QuantType quantType) { this->quantType = quan
void FormatTransPass::SetFmk(converter::FmkType fmkType) { this->fmkType = fmkType; }
int FormatTransPass::GetFormat(const std::unique_ptr<CNodeT> &node) {
switch (node->primitive->value.type) {
int FormatTransPass::GetFormat(const schema::CNodeT &node) {
switch (node.primitive->value.type) {
case schema::PrimitiveType_Conv2D:
return node->primitive->value.AsConv2D()->format;
return node.primitive->value.AsConv2D()->format;
case schema::PrimitiveType_DeConv2D:
return node->primitive->value.AsDeConv2D()->format;
return node.primitive->value.AsDeConv2D()->format;
case schema::PrimitiveType_DeDepthwiseConv2D:
return node->primitive->value.AsDeDepthwiseConv2D()->format;
return node.primitive->value.AsDeDepthwiseConv2D()->format;
case schema::PrimitiveType_DepthwiseConv2D:
return node->primitive->value.AsDepthwiseConv2D()->format;
return node.primitive->value.AsDepthwiseConv2D()->format;
case schema::PrimitiveType_Pooling:
return node->primitive->value.AsPooling()->format;
return node.primitive->value.AsPooling()->format;
default:
return schema::Format_NHWC;
}

@ -47,7 +47,10 @@ class FormatTransPass : public GraphPass {
STATUS DoNodeInoutFormatTrans(schema::MetaGraphT *graph);
int GetFormat(const std::unique_ptr<CNodeT> &node);
int GetFormat(const schema::CNodeT &);
STATUS GetInsertFormatTrans(const schema::CNodeT &node, FormatTransNodeType *beforeNodeType,
FormatTransNodeType *afterNodeType);
protected:
size_t id = 0;

Loading…
Cancel
Save