!12112 add mciro opcoders

From: @zoloft
Reviewed-by: 
Signed-off-by:
pull/12112/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit 11b2d4c8aa

@ -71,11 +71,28 @@ set(MICRO_CODER_SRC
${CMAKE_CURRENT_SOURCE_DIR}/opcoders/file_collector.cc
)
file(GLOB OPCODER_SRC_SERIALIZER ${CMAKE_CURRENT_SOURCE_DIR}
opcoders/serializers/nnacl_serializer/*.cc
)
file(GLOB_RECURSE OPCODER_SRC_FP32 RELATIVE ${CMAKE_CURRENT_SOURCE_DIR}
opcoders/nnacl/fp32/*.cc
)
file(GLOB_RECURSE OPCODER_SRC_INT8 RELATIVE ${CMAKE_CURRENT_SOURCE_DIR}
opcoders/nnacl/int8/*.cc
)
list(APPEND MICRO_CODER_SRC
${MICRO_ALLOCATOR}
${MICRO_GENERATOR}
${MICRO_OPCODERS_BASE}
${MICRO_OPCODERS_CMSIS_NN}
${OPCODER_SRC_SERIALIZER}
${OPCODER_SRC_FP32}
${OPCODER_SRC_INT8}
)
add_executable(codegen main.cc

@ -0,0 +1,71 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "micro/coder/opcoders/nnacl/fp32/activation_fp32_coder.h"
#include <string>
#include "nnacl/fp32/activation_fp32.h"
#include "nnacl/op_base.h"
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
#include "micro/coder/opcoders/file_collector.h"
using mindspore::schema::PrimitiveType_Activation;
namespace mindspore::lite::micro {
int ActivationFP32Coder::DoCode(CoderContext *const context) {
// attribute
auto *activation_parameter = reinterpret_cast<ActivationParameter *>(parameter_);
int task_id = 0;
int length = input_tensor_->ElementsNum();
MS_CHECK_TRUE(thread_num_ > 0, "thread_num_ <= 0");
int stride = UP_DIV(length, thread_num_);
int count = MSMIN(stride, length - stride * task_id);
if (activation_parameter->type_ == schema::ActivationType_SIGMOID) {
Collect(context, {"runtime/kernel/fp32/sigmoid.h"}, {"sigmoid.c"});
} else {
Collect(context, {"nnacl/fp32/activation.h"}, {"activation.c"});
}
nnacl::NNaclFp32Serializer code;
switch (activation_parameter->type_) {
case schema::ActivationType_RELU:
code.CodeFunction("Fp32Relu", input_tensor_, count, output_tensor_);
break;
case schema::ActivationType_RELU6:
code.CodeFunction("Fp32Relu6", input_tensor_, count, output_tensor_);
break;
case schema::ActivationType_LEAKY_RELU:
code.CodeFunction("LRelu", input_tensor_, count, output_tensor_, activation_parameter->alpha_);
break;
case schema::ActivationType_SIGMOID:
code.CodeFunction("Sigmoid", input_tensor_, count, output_tensor_);
break;
case schema::ActivationType_TANH:
code.CodeFunction("Tanh", input_tensor_, count, output_tensor_);
break;
case schema::ActivationType_HSWISH:
code.CodeFunction("HSwish", input_tensor_, count, output_tensor_);
break;
default:
MS_LOG(ERROR) << "Activation type error";
return RET_ERROR;
}
MS_LOG(DEBUG) << "ActivationFP32Code has been called";
context->AppendCode(code.str());
return lite::RET_OK;
}
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Activation, CPUOpCoderCreator<ActivationFP32Coder>)
} // namespace mindspore::lite::micro

@ -0,0 +1,40 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MICRO_CODER_OPCODERS_FP32_ACTIVATIONFP32_CODER_H_
#define MICRO_CODER_OPCODERS_FP32_ACTIVATIONFP32_CODER_H_
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
namespace mindspore::lite::micro {
class ActivationFP32Coder final : public OperatorCoder {
public:
ActivationFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
~ActivationFP32Coder() override = default;
int Prepare(CoderContext *const context) override { return RET_OK; }
int DoCode(CoderContext *const context) override;
};
} // namespace mindspore::lite::micro
#endif // MICRO_CODER_OPCODERS_FP32__CODER_H_

@ -0,0 +1,48 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "micro/coder/opcoders/nnacl/fp32/addn_fp32_coder.h"
#include <string>
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
#include "micro/coder/opcoders/file_collector.h"
using mindspore::schema::PrimitiveType_AddN;
namespace mindspore::lite::micro {
int AddNFP32Coder::DoCode(CoderContext *const context) {
Tensor *input0 = input_tensors_.at(kInputIndex);
Tensor *input1 = input_tensors_.at(1);
int elements_num = input0->ElementsNum();
// Get Tensor Pointer
std::string input0_str = allocator_->GetRuntimeAddr(input0);
std::string input1_str = allocator_->GetRuntimeAddr(input1);
Collect(context, {"nnacl/kernel/fp32/add_fp32_slim.h"}, {"add_fp32_slim.c"});
nnacl::NNaclFp32Serializer code;
code.CodeFunction("ElementAdd", input0_str, input1_str, output_tensor_, elements_num);
if (input_tensors_.size() > 2) {
for (size_t i = 2; i < input_tensors_.size(); ++i) {
std::string input_str = allocator_->GetRuntimeAddr(input_tensors_.at(i));
code.CodeFunction("ElementAdd", input_str, output_tensor_, elements_num);
}
}
context->AppendCode(code.str());
return RET_OK;
}
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_AddN, CPUOpCoderCreator<AddNFP32Coder>)
} // namespace mindspore::lite::micro

@ -0,0 +1,36 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_ADDN_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_ADDN_FP32_CODER_H_
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
namespace mindspore::lite::micro {
class AddNFP32Coder : public OperatorCoder {
public:
AddNFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
~AddNFP32Coder() override = default;
int Prepare(CoderContext *const context) override { return RET_OK; }
int DoCode(CoderContext *const context) override;
};
} // namespace mindspore::lite::micro
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_ADDN_FP32_CODER_H_

@ -0,0 +1,109 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MICRO_CODER_OPCODERS_FP32_ARITHMETIC_FP32_CODER_H_
#define MICRO_CODER_OPCODERS_FP32_ARITHMETIC_FP32_CODER_H_
#include <vector>
#include <string>
#include "micro/coder/opcoders/op_coder.h"
#include "nnacl/fp32/arithmetic_fp32.h"
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
#define DEFAULT_ARITHMETIC_NDIMS 10
namespace mindspore::lite::micro {
using mindspore::schema::PrimitiveType_Add;
using mindspore::schema::PrimitiveType_Div;
using mindspore::schema::PrimitiveType_Equal;
using mindspore::schema::PrimitiveType_FloorDiv;
using mindspore::schema::PrimitiveType_FloorMod;
using mindspore::schema::PrimitiveType_Greater;
using mindspore::schema::PrimitiveType_GreaterEqual;
using mindspore::schema::PrimitiveType_Less;
using mindspore::schema::PrimitiveType_LessEqual;
using mindspore::schema::PrimitiveType_LogicalAnd;
using mindspore::schema::PrimitiveType_LogicalOr;
using mindspore::schema::PrimitiveType_Maximum;
using mindspore::schema::PrimitiveType_Minimum;
using mindspore::schema::PrimitiveType_Mul;
using mindspore::schema::PrimitiveType_NotEqual;
using mindspore::schema::PrimitiveType_RealDiv;
using mindspore::schema::PrimitiveType_SquaredDifference;
using mindspore::schema::PrimitiveType_Sub;
using mindspore::schema::PrimitiveType_Eltwise;
using mindspore::schema::PrimitiveType_Minimum;
class ArithmeticFP32Coder final : public OperatorCoder {
public:
ArithmeticFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
~ArithmeticFP32Coder() override = default;
int Prepare(CoderContext *const context) override;
int DoCode(CoderContext *const context) override;
private:
int Init(CoderContext *const context);
int BroadcastRun(const std::string &input0, const std::string &input1, const std::string &output, int dim,
int out_count, int out_thread_stride, nnacl::NNaclFp32Serializer *const code);
int break_pos_{0};
int outside_{0};
int out_thread_stride_{0};
int out_count_{0};
ArithmeticParameter *arithmetic_parameter_{nullptr};
Tensor *filter_tensor_{nullptr};
std::string arithmetic_run_;
std::string arithmetic_run_int_;
std::string arithmetic_opt_run_;
std::string arithmetic_opt_run_int_;
LiteDataType data_type_{kDataTypeFloat};
};
} // namespace mindspore::lite::micro
#endif // MICRO_CODER_OPCODERS_FP32_ARITHMETIC_FP32_CODER_H_

@ -0,0 +1,104 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "micro/coder/opcoders/nnacl/fp32/arithmetic_self_fp32_coder.h"
#include <string>
#include <map>
#include "nnacl/fp32/arithmetic_fp32.h"
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
#include "micro/coder/opcoders/file_collector.h"
namespace mindspore::lite::micro {
int ArithmeticSelfFP32Coder::ReSize() {
data_size_ = input_tensor_->ElementsNum();
thread_sz_count_ = MSMIN(thread_num_, static_cast<int>(data_size_));
MS_CHECK_TRUE(thread_sz_count_ > 0, "thread_sz_count_ <= 0");
thread_sz_stride_ = UP_DIV(data_size_, thread_sz_count_);
return RET_OK;
}
int ArithmeticSelfFP32Coder::Prepare(CoderContext *const context) {
if (parameter_ == nullptr) {
return RET_ERROR;
}
std::map<int, std::function<void()>> type_setters = {
{PrimitiveType_Abs, [this]() { arithmetic_self_run_ = "ElementAbs"; }},
{PrimitiveType_Cos, [this]() { arithmetic_self_run_ = "ElementCos"; }},
{PrimitiveType_Log, [this]() { arithmetic_self_run_ = "ElementLog"; }},
{PrimitiveType_Square, [this]() { arithmetic_self_run_ = "ElementSquare"; }},
{PrimitiveType_Sqrt, [this]() { arithmetic_self_run_ = "ElementSqrt"; }},
{PrimitiveType_Rsqrt, [this]() { arithmetic_self_run_ = "ElementRsqrt"; }},
{PrimitiveType_Sin, [this]() { arithmetic_self_run_ = "ElementSin"; }},
{PrimitiveType_LogicalNot, [this]() { arithmetic_self_run_ = "ElementLogicalNot"; }},
{PrimitiveType_Floor, [this]() { arithmetic_self_run_ = "ElementFloor"; }},
{PrimitiveType_Ceil, [this]() { arithmetic_self_run_ = "ElementCeil"; }},
{PrimitiveType_Round, [this]() { arithmetic_self_run_ = "ElementRound"; }},
{PrimitiveType_Neg, [this]() { arithmetic_self_run_ = "ElementNegative"; }},
};
auto iter = type_setters.find(parameter_->type_);
if (iter != type_setters.end()) {
iter->second();
} else {
MS_LOG(ERROR) << "Error Operator type " << parameter_;
return RET_ERROR;
}
MS_CHECK_RET_CODE(ReSize(), "ReSize failed");
return RET_OK;
}
int ArithmeticSelfFP32Coder::DoCode(CoderContext *const context) {
int task_id = 0;
int size = MSMIN(thread_sz_stride_, static_cast<int>(data_size_ - task_id * thread_sz_stride_));
MS_CHECK_TRUE(!arithmetic_self_run_.empty(), "arithmetic_run function is nullptr!");
Collect(context, {"nnacl/arithmetic_common.h", "nnacl/fp32/arithmetic_self.h"}, {"nnacl/fp32/arithmetic_self.c"});
nnacl::NNaclFp32Serializer code;
code.CodeFunction(arithmetic_self_run_, input_tensor_, output_tensor_, size);
MS_LOG(DEBUG) << "ArithmeticSelfFP32Coder has been called";
context->AppendCode(code.str());
return RET_OK;
}
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Abs, CPUOpCoderCreator<ArithmeticSelfFP32Coder>)
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Cos, CPUOpCoderCreator<ArithmeticSelfFP32Coder>)
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Log, CPUOpCoderCreator<ArithmeticSelfFP32Coder>)
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Square, CPUOpCoderCreator<ArithmeticSelfFP32Coder>)
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Sqrt, CPUOpCoderCreator<ArithmeticSelfFP32Coder>)
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Rsqrt, CPUOpCoderCreator<ArithmeticSelfFP32Coder>)
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Sin, CPUOpCoderCreator<ArithmeticSelfFP32Coder>)
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_LogicalNot,
CPUOpCoderCreator<ArithmeticSelfFP32Coder>)
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Floor, CPUOpCoderCreator<ArithmeticSelfFP32Coder>)
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Ceil, CPUOpCoderCreator<ArithmeticSelfFP32Coder>)
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Round, CPUOpCoderCreator<ArithmeticSelfFP32Coder>)
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Neg, CPUOpCoderCreator<ArithmeticSelfFP32Coder>)
} // namespace mindspore::lite::micro

@ -0,0 +1,109 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MICRO_CODER_OPCODERS_FP32_ARITHMETIC_SELF_FP32_CODER_H_
#define MICRO_CODER_OPCODERS_FP32_ARITHMETIC_SELF_FP32_CODER_H_
#include <string>
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
#include "nnacl/fp32/arithmetic_self_fp32.h"
#include "nnacl/arithmetic_self_parameter.h"
namespace mindspore::lite::micro {
using mindspore::schema::PrimitiveType_Abs;
using mindspore::schema::PrimitiveType_Add;
using mindspore::schema::PrimitiveType_AddN;
using mindspore::schema::PrimitiveType_Neg;
using mindspore::schema::PrimitiveType_Ceil;
using mindspore::schema::PrimitiveType_Cos;
using mindspore::schema::PrimitiveType_Div;
using mindspore::schema::PrimitiveType_Equal;
using mindspore::schema::PrimitiveType_Floor;
using mindspore::schema::PrimitiveType_FloorDiv;
using mindspore::schema::PrimitiveType_FloorMod;
using mindspore::schema::PrimitiveType_Greater;
using mindspore::schema::PrimitiveType_GreaterEqual;
using mindspore::schema::PrimitiveType_Less;
using mindspore::schema::PrimitiveType_LessEqual;
using mindspore::schema::PrimitiveType_Log;
using mindspore::schema::PrimitiveType_LogicalAnd;
using mindspore::schema::PrimitiveType_LogicalOr;
using mindspore::schema::PrimitiveType_LogicalNot;
using mindspore::schema::PrimitiveType_Maximum;
using mindspore::schema::PrimitiveType_Minimum;
using mindspore::schema::PrimitiveType_Mul;
using mindspore::schema::PrimitiveType_NotEqual;
using mindspore::schema::PrimitiveType_RealDiv;
using mindspore::schema::PrimitiveType_Round;
using mindspore::schema::PrimitiveType_Rsqrt;
using mindspore::schema::PrimitiveType_Sqrt;
using mindspore::schema::PrimitiveType_SquaredDifference;
using mindspore::schema::PrimitiveType_Sub;
using mindspore::schema::PrimitiveType_Sin;
using mindspore::schema::PrimitiveType_Square;
class ArithmeticSelfFP32Coder final : public OperatorCoder {
public:
ArithmeticSelfFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
int Prepare(CoderContext *const context) override;
int DoCode(CoderContext *const context) override;
~ArithmeticSelfFP32Coder() override = default;
private:
int ReSize();
private:
int thread_sz_count_{0};
int thread_sz_stride_{0};
size_t data_size_{0};
std::string arithmetic_self_run_;
};
} // namespace mindspore::lite::micro
#endif // MICRO_CODER_OPCODERS_FP32_ARITHMETIC_SELF_FP32_CODER_H_

@ -0,0 +1,55 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "micro/coder/opcoders/nnacl/fp32/assign_add_fp32_coder.h"
#include <string>
#include "schema/inner/ops_generated.h"
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
namespace mindspore::lite::micro {
using mindspore::schema::PrimitiveType_AssignAdd;
int AssignAddFP32Coder::Prepare(CoderContext *const context) { return RET_OK; }
int AssignAddFP32Coder::DoCode(CoderContext *const context) {
MS_CHECK_TRUE(input_tensors_.size() == 2, "inputs size is not equal to two");
Tensor *input0 = input_tensors_.at(0);
Tensor *input1 = input_tensors_.at(1);
if (input0->Size() != input1->Size()) {
MS_LOG(ERROR) << "input0 size: " << input0->Size() << ", input1 size: " << input1->Size();
return RET_ERROR;
}
nnacl::NNaclFp32Serializer code;
// Get Tensor Pointer
std::string input0_str = allocator_->GetRuntimeAddr(input0);
std::string input1_str = allocator_->GetRuntimeAddr(input1);
size_t data_size = input0->Size();
// assign add, just add input1'data to input0
code << "\t\tfor (int i = 0; i < " << data_size << "; ++i) {\n";
code << "\t\t\t(" << input0_str << ")[i] += (" << input1_str << ")[i];\n";
code << "\t\t}\n";
code.CodeFunction("memcpy", output_tensor_, input0_str, data_size);
context->AppendCode(code.str());
return RET_OK;
}
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_AssignAdd, CPUOpCoderCreator<AssignAddFP32Coder>)
REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt32, PrimitiveType_AssignAdd, CPUOpCoderCreator<AssignAddFP32Coder>)
} // namespace mindspore::lite::micro

@ -0,0 +1,37 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_ASSIGN_ADD_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_ASSIGN_ADD_FP32_CODER_H_
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
#include "nnacl/base/tile_base.h"
namespace mindspore::lite::micro {
class AssignAddFP32Coder : public OperatorCoder {
public:
AssignAddFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
~AssignAddFP32Coder() override = default;
int Prepare(CoderContext *const context) override;
int DoCode(CoderContext *const context) override;
};
} // namespace mindspore::lite::micro
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_ASSIGN_ADD_FP32_CODER_H_

@ -0,0 +1,69 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "micro/coder/opcoders/nnacl/fp32/batchnorm_fp32_coder.h"
#include <string>
#include <vector>
#include "nnacl/fp32/batchnorm_fp32.h"
#include "src/ops/batch_norm.h"
#include "nnacl/op_base.h"
#include "micro/coder/opcoders/file_collector.h"
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
using mindspore::schema::PrimitiveType_BatchNorm;
namespace mindspore::lite::micro {
int BatchnormFP32Coder::Init() {
auto bn_parameter = reinterpret_cast<BatchNormParameter *>(parameter_);
auto bn_prim = reinterpret_cast<const mindspore::lite::BatchNorm *>(OperatorCoder::primitive());
bn_parameter->epsilon_ = bn_prim->GetEpsilon();
std::vector<int> input_shapes = input_tensor_->shape();
if (input_shapes.empty()) {
return RET_ERROR;
}
int n_dim = static_cast<int>(input_shapes.size());
bn_parameter->channel_ = input_shapes.at(n_dim - 1);
bn_parameter->unit_ = 1;
for (int i = 0; i < n_dim - 1; i++) {
bn_parameter->unit_ *= input_shapes.at(i);
}
bn_parameter->op_parameter_.thread_num_ = MSMIN(bn_parameter->op_parameter_.thread_num_, bn_parameter->unit_);
return RET_OK;
}
int BatchnormFP32Coder::DoCode(CoderContext *const context) {
// attribute
int task_id = 0;
auto bn_parameter = reinterpret_cast<BatchNormParameter *>(parameter_);
if (Init() != RET_OK) {
MS_LOG(ERROR) << "BatchnormFP32Coder Init error";
return RET_ERROR;
}
MS_CHECK_TRUE(input_tensors_.size() == 3, "inputs size is not equal to three");
Tensor *mean_tensor = input_tensors_.at(1);
Tensor *var_tensor = input_tensors_.at(2);
Collect(context, {"nnacl/fp32/batchnorm.h"}, {"nnacl/fp32/batchnorm.c"});
nnacl::NNaclFp32Serializer code;
code.CodeStruct("bn_parameter", *bn_parameter);
code.CodeFunction("BatchNorm", output_tensor_, input_tensor_, mean_tensor, var_tensor, task_id, "&bn_parameter");
MS_LOG(INFO) << "BatchnormFP32Code has been called";
context->AppendCode(code.str());
return lite::RET_OK;
}
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_BatchNorm, CPUOpCoderCreator<BatchnormFP32Coder>)
} // namespace mindspore::lite::micro

@ -0,0 +1,43 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MICRO_CODER_OPCODERS_FP32_BATCHNORM_FP32_CODER_H_
#define MICRO_CODER_OPCODERS_FP32_BATCHNORM_FP32_CODER_H_
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
namespace mindspore::lite::micro {
class BatchnormFP32Coder final : public OperatorCoder {
public:
BatchnormFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
~BatchnormFP32Coder() override = default;
int Prepare(CoderContext *const context) override { return RET_OK; }
int DoCode(CoderContext *const context) override;
private:
int Init();
};
} // namespace mindspore::lite::micro
#endif // MICRO_CODER_OPCODERS_FP32_CODER_H_

@ -0,0 +1,77 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "micro/coder/opcoders/nnacl/fp32/concat_fp32_coder.h"
#include <string>
#include <vector>
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
#include "micro/coder/opcoders/file_collector.h"
using mindspore::schema::PrimitiveType_Concat;
namespace mindspore::lite::micro {
int ConcatFP32Coder::Prepare(CoderContext *const context) {
concat_param_ = reinterpret_cast<ConcatParameter *>(parameter_);
return ReSize();
}
int ConcatFP32Coder::ReSize() {
axis_ = concat_param_->axis_ >= 0 ? concat_param_->axis_
: static_cast<int>(input_tensor_->shape().size()) + concat_param_->axis_;
return RET_OK;
}
int ConcatFP32Coder::DoCode(CoderContext *const context) {
Collect(context, {"nnacl/fp32/concat.h"}, {"nnacl/fp32/concat.c"});
size_t input_num = input_tensors_.size();
nnacl::NNaclFp32Serializer code;
code << "\t\tvoid *inputs_addr[] = {";
for (size_t i = 0; i < input_num; ++i) {
code << allocator_->GetRuntimeAddr(input_tensors_.at(i)) << ", ";
}
code << "};\n";
size_t i;
for (i = 0; i < input_num; ++i) {
code << "\t\tint shape_" << i << "[] = {";
for (auto &shape : input_tensors_.at(i)->shape()) {
code << shape << ", ";
}
code << "};\n";
}
code << "\t\tint shape_" << i << "[] = {";
for (auto &shape : output_tensor_->shape()) {
code << shape << ", ";
}
code << "};\n";
code << "\t\tint *inputs_output_shape[] = {";
for (i = 0; i <= input_num; ++i) {
code << "shape_" << i << ", ";
}
code << "};\n";
code.CodeFunction("Concat", "inputs_addr", input_num, axis_, "inputs_output_shape", output_tensor_->shape().size(),
output_tensor_, 0, thread_num_);
context->AppendCode(code.str());
return RET_OK;
}
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Concat, CPUOpCoderCreator<ConcatFP32Coder>)
} // namespace mindspore::lite::micro

@ -0,0 +1,42 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_CONCAT_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_CONCAT_FP32_CODER_H_
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
#include "nnacl/concat_parameter.h"
namespace mindspore::lite::micro {
class ConcatFP32Coder : public OperatorCoder {
public:
ConcatFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
~ConcatFP32Coder() override = default;
int Prepare(CoderContext *const context) override;
int DoCode(CoderContext *const context) override;
private:
int ReSize();
int axis_{0};
ConcatParameter *concat_param_{nullptr};
};
} // namespace mindspore::lite::micro
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_CONCAT_FP32_CODER_H_

@ -0,0 +1,52 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "micro/coder/opcoders/nnacl/fp32/expand_dims_fp32_coder.h"
#include <string>
#include "micro/coder/opcoders/file_collector.h"
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
using mindspore::schema::PrimitiveType_ExpandDims;
namespace mindspore::lite::micro {
int ExpandDimsFP32Coder::Prepare(CoderContext *const context) { return ReSize(); }
int ExpandDimsFP32Coder::ReSize() {
data_size_ = input_tensor_->ElementsNum();
thread_sz_count_ = MSMIN(thread_num_, static_cast<int>(data_size_));
MS_CHECK_TRUE(thread_sz_count_ > 0, "thread_sz_count_ is less or equal to 0");
thread_sz_stride_ = UP_DIV(data_size_, thread_sz_count_);
return RET_OK;
}
int ExpandDimsFP32Coder::DoCode(CoderContext *const context) {
// generate code .h .c
Collect(context, {"nnacl/fp32/expandDims.h"}, {"nnacl/fp32/expandDims.c"});
nnacl::NNaclFp32Serializer code;
int task_id = 0;
size_t size = MSMIN(thread_sz_stride_, static_cast<int>(data_size_ - task_id * thread_sz_stride_));
if (!size) {
return RET_OK;
}
code.CodeFunction("ExpandDims", input_tensor_, output_tensor_, size * sizeof(float));
context->AppendCode(code.str());
return RET_OK;
}
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_ExpandDims, CPUOpCoderCreator<ExpandDimsFP32Coder>)
REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt32, PrimitiveType_ExpandDims, CPUOpCoderCreator<ExpandDimsFP32Coder>)
} // namespace mindspore::lite::micro

@ -0,0 +1,42 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_EXPANDDIMS_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_EXPANDDIMS_FP32_CODER_H_
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
namespace mindspore::lite::micro {
class ExpandDimsFP32Coder : public OperatorCoder {
public:
ExpandDimsFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
~ExpandDimsFP32Coder() override = default;
int Prepare(CoderContext *const context) override;
int DoCode(CoderContext *const context) override;
private:
int ReSize();
int thread_sz_count_{0};
int thread_sz_stride_{0};
size_t data_size_{0};
};
} // namespace mindspore::lite::micro
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_EXPANDDIMS_FP32_CODER_H_

@ -0,0 +1,69 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "micro/coder/opcoders/nnacl/fp32/gather_fp32_coder.h"
#include <string>
#include "nnacl/gather_parameter.h"
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
#include "micro/coder/log.h"
#include "micro/coder/opcoders/file_collector.h"
using mindspore::schema::PrimitiveType_Gather;
namespace mindspore::lite::micro {
int GatherFP32Coder::Prepare(CoderContext *const context) { return RET_OK; }
int GatherFP32Coder::DoCode(CoderContext *context) {
Tensor *input0 = input_tensors_.at(0);
Tensor *input1 = input_tensors_.at(1);
// generate code .h .c
Collect(context, {"nnacl/fp32/gather.h"}, {"nnacl/fp32/gather.c"});
nnacl::NNaclFp32Serializer code;
std::vector<int> in_shape = input0->shape();
int in_rank = in_shape.size();
int indices_element_size = input1->ElementsNum();
int axis = (reinterpret_cast<GatherParameter *>(parameter_))->axis_;
MS_CHECK_TRUE(static_cast<int>(in_shape.size()) >= axis, "invalid axis in gather parameter");
const int limit = in_shape.at(axis);
int outer_size = 1, inner_size = 1;
for (int i = 0; i < axis; ++i) {
outer_size *= in_shape.at(i);
}
for (int i = axis + 1; i < in_rank; ++i) {
inner_size *= in_shape.at(i);
}
int task_id = 0;
MS_CHECK_TRUE(thread_num_ > 0, "thread_num_ <= 0");
int stride = UP_DIV(outer_size, thread_num_);
int count = MSMIN(stride, outer_size - stride * task_id);
// call the op function
if (input0->data_type() == kNumberTypeInt32) {
code.CodeFunction("GatherInt32", input0, count, inner_size, limit, input1, indices_element_size, output_tensor_);
} else {
code.CodeFunction("Gather", input0, count, inner_size, limit, input1, indices_element_size, output_tensor_);
}
context->AppendCode(code.str());
return RET_OK;
}
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Gather, CPUOpCoderCreator<GatherFP32Coder>)
} // namespace mindspore::lite::micro

@ -0,0 +1,41 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_GATHER_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_GATHER_FP32_CODER_H_
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
#include "nnacl/base/tile_base.h"
namespace mindspore::lite::micro {
class GatherFP32Coder : public OperatorCoder {
public:
GatherFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
~GatherFP32Coder() override = default;
int Prepare(CoderContext *const context) override;
int DoCode(CoderContext *const context) override;
private:
int32_t *indices_{nullptr};
};
} // namespace mindspore::lite::micro
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_GATHER_FP32_CODER_H_

@ -0,0 +1,51 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "micro/coder/opcoders/nnacl/fp32/nchw2nhwc_fp32_coder.h"
#include <vector>
#include <string>
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
#include "micro/coder/opcoders/file_collector.h"
using mindspore::schema::PrimitiveType_Nchw2Nhwc;
namespace mindspore::lite::micro {
int Nchw2NhwcFP32Coder::Prepare(CoderContext *const context) { return RET_OK; }
int Nchw2NhwcFP32Coder::DoCode(CoderContext *context) {
// generate code .h .c
Collect(context, {"nnacl/pack.h"}, {"nnacl/pack.c"});
nnacl::NNaclFp32Serializer code;
if (input_tensor_->shape().size() == 4) {
if (input_tensor_->data_type() == kNumberTypeFloat32) {
code.CodeFunction("PackNCHWToNHWCFp32", input_tensor_, output_tensor_, output_tensor_->Batch(),
output_tensor_->Height() * output_tensor_->Width(), output_tensor_->Channel());
} else if (input_tensor_->data_type() == kNumberTypeInt8) {
code.CodeFunction("PackNCHWToNHWCInt8", input_tensor_, output_tensor_, output_tensor_->Batch(),
output_tensor_->Height() * output_tensor_->Width(), output_tensor_->Channel());
} else {
MS_LOG(ERROR) << "unsupported format transform";
}
} else {
code.CodeFunction("memcpy", output_tensor_, input_tensor_, input_tensor_->ElementsNum() * sizeof(float));
}
context->AppendCode(code.str());
return RET_OK;
}
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Nchw2Nhwc, CPUOpCoderCreator<Nchw2NhwcFP32Coder>)
} // namespace mindspore::lite::micro

@ -0,0 +1,38 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_NCHW2FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NCHW2FP32_CODER_H_
#include <string>
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
#include "nnacl/base/tile_base.h"
namespace mindspore::lite::micro {
class Nchw2NhwcFP32Coder : public OperatorCoder {
public:
Nchw2NhwcFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
~Nchw2NhwcFP32Coder() override = default;
int Prepare(CoderContext *const context) override;
int DoCode(CoderContext *const context) override;
};
} // namespace mindspore::lite::micro
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_NCHW2FP32_CODER_H_

@ -0,0 +1,50 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "micro/coder/opcoders/nnacl/fp32/nhwc2nchw_fp32_coder.h"
#include <string>
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
#include "micro/coder/opcoders/file_collector.h"
using mindspore::schema::PrimitiveType_Nhwc2Nchw;
namespace mindspore::lite::micro {
int Nhwc2NchwFP32Coder::Prepare(CoderContext *const context) { return RET_OK; }
int Nhwc2NchwFP32Coder::DoCode(CoderContext *const context) {
// generate code .h .c
Collect(context, {"nnacl/pack.h"}, {"pack.c"});
nnacl::NNaclFp32Serializer code;
if (input_tensor_->shape().size() == 4) {
if (input_tensor_->data_type() == kNumberTypeFloat32) {
code.CodeFunction("PackNHWCToNCHWFp32", input_tensor_, output_tensor_, output_tensor_->Batch(),
output_tensor_->Height() * output_tensor_->Width(), output_tensor_->Channel());
} else if (input_tensor_->data_type() == kNumberTypeInt8) {
code.CodeFunction("PackNHWCToNCHWInt8", input_tensor_, output_tensor_, output_tensor_->Batch(),
output_tensor_->Height() * output_tensor_->Width(), output_tensor_->Channel());
} else {
MS_LOG(ERROR) << "unsupported format transform";
}
} else {
code.CodeFunction("memcpy", output_tensor_, input_tensor_, input_tensor_->ElementsNum() * sizeof(float));
}
context->AppendCode(code.str());
return RET_OK;
}
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Nhwc2Nchw, CPUOpCoderCreator<Nhwc2NchwFP32Coder>)
} // namespace mindspore::lite::micro

@ -0,0 +1,37 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_NHWC2NCHW_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NHWC2NCHW_FP32_CODER_H_
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
#include "nnacl/base/tile_base.h"
namespace mindspore::lite::micro {
class Nhwc2NchwFP32Coder : public OperatorCoder {
public:
Nhwc2NchwFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
~Nhwc2NchwFP32Coder() override = default;
int Prepare(CoderContext *const context) override;
int DoCode(CoderContext *const context) override;
};
} // namespace mindspore::lite::micro
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_NHWC2NCHW_FP32_CODER_H_

@ -0,0 +1,103 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "micro/coder/opcoders/nnacl/fp32/pad_fp32_coder.h"
#include <string>
#include <vector>
#include "micro/coder/log.h"
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
#include "micro/coder/opcoders/file_collector.h"
using mindspore::schema::PrimitiveType_Pad;
namespace mindspore::lite::micro {
int PadFP32Coder::Prepare(CoderContext *const context) {
pad_param_ = reinterpret_cast<PadParameter *>(parameter_);
return ReSize();
}
int PadFP32Coder::ReSize() {
size_t rank = input_tensor_->shape().size();
if (rank > DEFAULT_PAD_NDIMS) {
MS_LOG(ERROR) << "Pad input rank should <= " << DEFAULT_PAD_NDIMS << ", got " << rank;
return RET_ERROR;
}
if (pad_param_->pad_mode_ == static_cast<int>(schema::PaddingMode_CONSTANT)) {
MS_CHECK_RET_CODE(ExtendShape(in_, DEFAULT_PAD_NDIMS, input_tensor_->shape().data(), rank),
"ExtendShape input error");
MS_CHECK_RET_CODE(ExtendShape(out_, DEFAULT_PAD_NDIMS, output_tensor_->shape().data(), rank),
"ExtendShape output error");
if (pad_param_->padding_length < MAX_PAD_SIZE) {
int ori_paddings[MAX_PAD_SIZE];
for (int i = 0; i < pad_param_->padding_length; ++i) {
ori_paddings[i] = pad_param_->paddings_[i];
}
MS_CHECK_RET_CODE(ExtendPaddings(pad_param_->paddings_, MAX_PAD_SIZE, ori_paddings, pad_param_->padding_length),
"Extendpadding error");
pad_param_->padding_length = MAX_PAD_SIZE;
}
}
return RET_OK;
}
int PadFP32Coder::ExtendShape(int *shape, int length, const int *ori_shape, int rank) {
MS_CHECK_PTR(shape);
MS_CHECK_PTR(ori_shape);
for (int i = 0; i < length - rank; ++i) {
shape[i] = 1;
}
for (int i = length - rank; i < length; ++i) {
shape[i] = ori_shape[i - (length - rank)];
}
return RET_OK;
}
int PadFP32Coder::ExtendPaddings(int *paddings, int length, const int *ori_paddings, int ori_length) {
MS_CHECK_PTR(paddings);
MS_CHECK_PTR(ori_paddings);
for (int i = 0; i < length - ori_length; ++i) {
paddings[i] = 0;
}
for (int i = length - ori_length; i < length; ++i) {
paddings[i] = ori_paddings[i - (length - ori_length)];
}
return RET_OK;
}
int PadFP32Coder::DoCode(CoderContext *const context) {
int task_id = thread_num_ - 1;
Collect(context, {"nnacl/fp32/pad.h", "nnacl/pad_parameter.h"}, {"nnacl/fp32/pad.c"});
nnacl::NNaclFp32Serializer code;
code.CodeArray("in_", in_, DEFAULT_PAD_NDIMS);
code.CodeArray("out_", out_, DEFAULT_PAD_NDIMS);
code.CodeArray("padding_", pad_param_->paddings_, MAX_PAD_SIZE);
int output_size = output_tensor_->ElementsNum();
if (pad_param_->constant_value_ - 0.0f < 1e-5) {
code.CodeFunction("memset", output_tensor_, "0", output_size * sizeof(float));
} else {
std::vector<float> constant_values(output_size, pad_param_->constant_value_);
code.CodeArray("output_tensor_", constant_values.data(), output_size);
}
code.CodeFunction("Pad", input_tensor_, output_tensor_, "in_", "out_", "padding_", task_id, thread_num_);
context->AppendCode(code.str());
return RET_OK;
}
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Pad, CPUOpCoderCreator<PadFP32Coder>)
} // namespace mindspore::lite::micro

@ -0,0 +1,49 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_PAD_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_PAD_FP32_CODER_H_
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
#include "nnacl/fp32/pad_fp32.h"
namespace mindspore::lite::micro {
class PadFP32Coder : public OperatorCoder {
public:
PadFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
~PadFP32Coder() override = default;
int Prepare(CoderContext *const context) override;
int DoCode(CoderContext *const context) override;
int ReSize();
private:
int ExtendShape(int *shape, int length, const int *ori_shape, int rank);
int ExtendPaddings(int *paddings, int length, const int *ori_paddings, int ori_length);
protected:
PadParameter *pad_param_{nullptr};
int in_[DEFAULT_PAD_NDIMS]{0};
int out_[DEFAULT_PAD_NDIMS]{0};
};
} // namespace mindspore::lite::micro
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_PAD_FP32_CODER_H_

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save