!14479 fix rpnt_pdr model

From: @zoloft
Reviewed-by: 
Signed-off-by:
pull/14479/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit ece2f27889

@ -232,7 +232,7 @@ void CodeFreeResourceImplement(std::ofstream &ofs, const std::unique_ptr<CoderCo
for (const auto &item : ctx->tensors_map()) {
Tensor *tensor = item.first;
std::string name = item.second;
if (tensor->data_c() != nullptr && tensor->category() != Tensor::Category::CONST_TENSOR) {
if (tensor->data_c() != nullptr && !(CheckConstantTensor(tensor))) {
ofs << name << ", ";
num++;
}

@ -43,7 +43,7 @@ void CodeModelParamsState(std::ofstream &ofs, const std::map<std::string, Tensor
for (auto &item : weights) {
std::string name = item.first;
Tensor *tensor = item.second;
if (tensor->category() == Tensor::Category::CONST_TENSOR) {
if (CheckConstantTensor(tensor)) {
if (tensor->data_c() == nullptr) {
continue;
}
@ -56,7 +56,7 @@ void CodeModelParamsData(std::ofstream &ofs, const std::map<std::string, Tensor
for (auto &item : weights) {
std::string name = item.first;
Tensor *tensor = item.second;
if (tensor->category() == Tensor::Category::CONST_TENSOR) {
if (CheckConstantTensor(tensor)) {
if (tensor->data_c() == nullptr) {
continue;
}
@ -78,7 +78,7 @@ void CodeModelParamsForNet(std::ofstream &hofs, std::ofstream &cofs, const std::
if (tensor->data_c() == nullptr) {
continue;
}
if (tensor->category() == Tensor::Category::CONST_TENSOR) {
if (CheckConstantTensor(tensor)) {
hofs << "extern " << GetTensorDataType(tensor->data_type()) << name << "[];\n";
cofs << GetTensorDataType(tensor->data_type()) << name << "[" << tensor->ElementsNum() << "];\n";
} else if (tensor->category() == Tensor::Category::VAR) {
@ -114,7 +114,7 @@ void CodeWeightInitFunc(std::ofstream &ofs, const std::unique_ptr<CoderContext>
for (const auto &item : ctx->saved_weights()) {
std::string name = item.first;
Tensor *tensor = item.second;
if (tensor->category() != Tensor::Category::CONST_TENSOR) {
if (!CheckConstantTensor(tensor)) {
continue;
}
std::map<Tensor *, std::string> ctx_tensor_map = ctx->tensors_map();
@ -152,7 +152,7 @@ void SaveDataToNet(const std::map<std::string, Tensor *> &saved_weights, const s
for (auto &item : saved_weights) {
std::string name = item.first;
Tensor *tensor = item.second;
if (tensor->category() == Tensor::Category::CONST_TENSOR && tensor->data_c() != nullptr) {
if ((CheckConstantTensor(tensor)) && tensor->data_c() != nullptr) {
net.write(reinterpret_cast<const char *>(tensor->data_c()), tensor->Size());
}
}

@ -265,15 +265,7 @@ void ArithmeticFP32Coder::ComputeInOutStrides() {
}
}
int ArithmeticFP32Coder::DoCode(CoderContext *const context) {
ComputeInOutStrides();
int element_num = output_tensor_->ElementsNum();
MS_CHECK_TRUE(thread_num_ > 0, "thread_num_ <= 0");
int stride = UP_DIV(element_num, thread_num_);
int count = MSMIN(stride, element_num - stride * kDefaultTaskId);
MS_CHECK_TRUE(!arithmetic_run_.empty(), "arithmetic_run function is nullptr!");
NNaclFp32Serializer code;
void ArithmeticFP32Coder::CollectFilesForFnc(CoderContext *const context) {
/**
* for nnacl's operator combine all arithmetic to nnalc/arithmetic.c
* this solution is not suitable for micro, for the size of package.
@ -312,18 +304,34 @@ int ArithmeticFP32Coder::DoCode(CoderContext *const context) {
{
"add_relu_fp32.c",
});
} else if (arithmetic_run_ == "ElementDivRelu6" || arithmetic_run_ == "ElementDivRelu" ||
arithmetic_run_ == "ElementDiv") {
Collect(context,
{
"nnacl/fp32/div_fp32.h",
},
{
"div_fp32.c",
});
} else {
Collect(context,
{
"nnacl/arithmetic_common.h",
"nnacl/fp32/arithmetic_fp32.h",
},
{
"arithmetic_common.c",
"arithmetic_fp32.c",
});
}
}
int ArithmeticFP32Coder::DoCode(CoderContext *const context) {
ComputeInOutStrides();
int element_num = output_tensor_->ElementsNum();
MS_CHECK_TRUE(thread_num_ > 0, "thread_num_ is less than zero");
int stride = UP_DIV(element_num, thread_num_);
int count = MSMIN(stride, element_num - stride * kDefaultTaskId);
MS_CHECK_TRUE(!arithmetic_run_.empty(), "arithmetic_run function is nullptr!");
NNaclFp32Serializer code;
if (arithmetic_parameter_->broadcasting_) {
stride = UP_DIV(outside_, thread_num_);
out_count_ = MSMIN(stride, outside_ - stride * kDefaultTaskId);

@ -85,6 +85,8 @@ class ArithmeticFP32Coder final : public OperatorCoder {
int BroadcastRun(const std::string &input0, const std::string &input1, const std::string &output, int dim,
int out_count, int out_thread_stride, NNaclFp32Serializer *const code);
void CollectFilesForFnc(CoderContext *const context);
int break_pos_{0};
int outside_{0};

@ -69,11 +69,10 @@ int ArithmeticSelfFP32Coder::DoCode(CoderContext *const context) {
Collect(context,
{
"nnacl/arithmetic_common.h",
"nnacl/fp32/arithmetic_self.h",
"nnacl/fp32/arithmetic_self_fp32.h",
},
{
"nnacl/fp32/arithmetic_self.c",
"arithmetic_self_fp32.c",
});
NNaclFp32Serializer code;
code.CodeFunction(arithmetic_self_run_, input_tensor_, output_tensor_, size);

@ -37,10 +37,10 @@ int ConcatFP32Coder::ReSize() {
int ConcatFP32Coder::DoCode(CoderContext *const context) {
Collect(context,
{
"nnacl/fp32/concat.h",
"nnacl/base/concat_base.h",
},
{
"nnacl/fp32/concat.c",
"concat_base.c",
});
size_t input_num = input_tensors_.size();
@ -74,7 +74,7 @@ int ConcatFP32Coder::DoCode(CoderContext *const context) {
code << "};\n";
code.CodeFunction("Concat", "inputs_addr", input_num, axis_, "inputs_output_shape", output_tensor_->shape().size(),
output_tensor_, 0, thread_num_);
output_tensor_, 0, thread_num_, sizeof(float));
context->AppendCode(code.str());
return RET_OK;
}

@ -50,7 +50,7 @@ int ExpFP32Coder::DoCode(CoderContext *ctx) {
});
nnacl::NNaclFp32Serializer code;
code.CodeStruct("exp_parameter", *exp_parameter_);
code.CodeFunction("Exp", input_tensor_, "(ExpParameter *)&exp_parameter", kDefaultTaskId);
code.CodeFunction("Exp", input_tensor_, output_tensor_, "(ExpParameter *)&exp_parameter", kDefaultTaskId);
ctx->AppendCode(code.str());
return RET_OK;
}

@ -38,7 +38,7 @@ int PowerFP32Coder::DoCode(CoderContext *const context) {
std::string exp_addr;
bool broadcast = true;
if (input_tensors_.size() == 2) {
exp_addr = allocator_->GetRuntimeAddr(filter_tensor);
exp_addr = allocator_->GetRuntimeAddr(filter_tensor, true);
broadcast = !(input_tensor_->shape() == filter_tensor->shape());
}
std::string cur_exp_str;
@ -50,10 +50,11 @@ int PowerFP32Coder::DoCode(CoderContext *const context) {
// generate code .h .c
Collect(context,
{
"nnacl/power.h",
"nnacl/power_parameter.h",
"nnacl/fp32/power_fp32.h",
},
{
"power.c",
"power_fp32.c",
});
NNaclFp32Serializer code;
code.CodeFunction("Power", input_tensor_, cur_exp_str, output_tensor_, len, scale_, shift_, broadcast);

@ -24,6 +24,10 @@
#include "coder/allocator/allocator.h"
namespace mindspore::lite::micro {
bool CheckConstantTensor(const Tensor *const tensor) {
return tensor->category() == Tensor::Category::CONST_TENSOR || tensor->category() == Tensor::Category::CONST_SCALAR;
}
template <typename T>
void TensorDataToFile(const lite::Tensor *tensor, std::ofstream &ofs) {
const int NUM = 45;
@ -79,7 +83,7 @@ std::string TensorsToString(const std::vector<Tensor *> &tensors, const std::str
MemoryAllocator *allocator = MemoryAllocator::GetInstance();
std::string info;
for (const auto &tensor : tensors) {
if (tensor->category() == Tensor::Category::CONST_TENSOR) {
if (CheckConstantTensor(tensor)) {
continue;
}
info += " {\n";

@ -30,6 +30,8 @@ namespace mindspore::lite::micro {
constexpr int kWeightPrecision = 9;
bool CheckConstantTensor(const Tensor *const tensor);
std::vector<std::string> AddDumpDataInfo(const std::vector<std::string> &blocks,
const std::vector<std::unique_ptr<OperatorCoder>> &opcoders);

@ -93,11 +93,9 @@ gen_mobile() {
local CODEGEN_FILE="${CODEGEN_FILE_NAME}.tar.gz"
local CODEGEN_LITE_DOWNLOAD_URL="https://ms-release.obs.cn-north-4.myhuaweicloud.com/${VERSION_STR}/MindSpore/lite/release/linux/${CODEGEN_FILE}"
# if [ ! -e ${BASEPATH}/build/${CODEGEN_FILE} ]; then
# wget -c -O ${BASEPATH}/build/${CODEGEN_FILE} --no-check-certificate ${CODEGEN_LITE_DOWNLOAD_URL}
# fi
cp ${OUTPUT_DIR}/${CODEGEN_FILE} ${BASEPATH}/build || exit 1
if [ ! -e ${BASEPATH}/build/${CODEGEN_FILE} ]; then
wget -c -O ${BASEPATH}/build/${CODEGEN_FILE} --no-check-certificate ${CODEGEN_LITE_DOWNLOAD_URL}
fi
tar xzvf ${BASEPATH}/build/${CODEGEN_FILE} -C ${BASEPATH}/build/ || exit 1
rm ${BASEPATH}/build/${CODEGEN_FILE} || exit 1

Loading…
Cancel
Save