!13423 debug mobilenetv2

From: @zhujingxuan
Reviewed-by: @wangchengyuan,@hangangqiang
Signed-off-by: @wangchengyuan
pull/13423/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit 9e372b2d7f

@ -101,6 +101,7 @@ set(CODER_OPCODERS_SRC
${MICRO_DIR}/coder/opcoders/nnacl/int8/conv2d_1x1_int8_coder.cc
${MICRO_DIR}/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.cc
${MICRO_DIR}/coder/opcoders/nnacl/int8/conv2d_int8_coder.cc
${MICRO_DIR}/coder/opcoders/nnacl/int8/convolution_depthwise_int8_coder.cc
${MICRO_DIR}/coder/opcoders/nnacl/int8/deconvolution_int8_coder.cc
${MICRO_DIR}/coder/opcoders/nnacl/int8/pooling_int8_coder.cc
${MICRO_DIR}/coder/opcoders/nnacl/int8/resize_int8_coder.cc

@ -7,7 +7,9 @@ set(RUNTIME_SRC
)
set(WRAPPER_SRC
${WRAPPER_DIR}/base/common_wrapper.c
${WRAPPER_DIR}/base/detection_post_process_base_wrapper.c
${WRAPPER_DIR}/base/optimize_handler_wrapper.c
${WRAPPER_DIR}/fp32/matmul_fp32_wrapper.c
${WRAPPER_DIR}/int8/matmul_int8_wrapper.c
${WRAPPER_DIR}/int8/add_int8_wrapper.c

@ -21,6 +21,7 @@
#include "securec/include/securec.h"
#include "coder/opcoders/nnacl/int8/conv2d_1x1_int8_coder.h"
#include "coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.h"
#include "coder/opcoders/nnacl/int8/convolution_depthwise_int8_coder.h"
#include "coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h"
#include "src/runtime/kernel/arm/base/convolution_base.h"
#include "src/ops/populate/populate_register.h"
@ -250,7 +251,39 @@ std::unique_ptr<OperatorCoder> CPUConv2DINT8CoderCreator(const std::vector<Tenso
return coder;
}
REG_OPERATOR_CODER(kX86, kNumberTypeInt8, PrimitiveType_Conv2DFusion, CPUConv2DINT8CoderCreator)
REG_OPERATOR_CODER(kARM32A, kNumberTypeInt8, PrimitiveType_Conv2DFusion, CPUConv2DINT8CoderCreator)
REG_OPERATOR_CODER(kARM64, kNumberTypeInt8, PrimitiveType_Conv2DFusion, CPUConv2DINT8CoderCreator)
std::unique_ptr<OperatorCoder> CPUConv2DFusionINT8CoderCreator(const std::vector<Tensor *> &in_tensors,
const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index,
Target target) {
const void *primitive = node->primitive_;
if (primitive == nullptr) {
return nullptr;
}
int schema_version = VersionManager::GetInstance()->GetSchemaVersion();
ParameterGen paramGen =
PopulateRegistry::GetInstance()->GetParameterCreator(GetPrimitiveType(node->primitive_), schema_version);
if (paramGen == nullptr) {
MS_LOG(ERROR) << "parameter generator is null";
return nullptr;
}
auto conv_param = reinterpret_cast<ConvParameter *>(paramGen(node->primitive_));
std::unique_ptr<OperatorCoder> coder;
if (conv_param->group_ == 1) {
coder = CPUConv2DINT8CoderCreator(in_tensors, out_tensors, node, node_index, target);
} else if (conv_param->group_ == conv_param->input_channel_ && conv_param->group_ == conv_param->output_channel_) {
coder = CPUOpCoderCreator<ConvolutionDepthwiseINT8Coder>(in_tensors, out_tensors, node, node_index, target);
} else {
// group conv
}
free(conv_param);
if (coder == nullptr) {
MS_LOG(ERROR) << "create conv2d int8 coder failed";
return nullptr;
}
return coder;
}
REG_OPERATOR_CODER(kX86, kNumberTypeInt8, PrimitiveType_Conv2DFusion, CPUConv2DFusionINT8CoderCreator)
REG_OPERATOR_CODER(kARM32A, kNumberTypeInt8, PrimitiveType_Conv2DFusion, CPUConv2DFusionINT8CoderCreator)
REG_OPERATOR_CODER(kARM64, kNumberTypeInt8, PrimitiveType_Conv2DFusion, CPUConv2DFusionINT8CoderCreator)
} // namespace mindspore::lite::micro::nnacl

@ -22,8 +22,6 @@
#include "coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h"
#include "nnacl/int8/conv_depthwise_int8.h"
using mindspore::schema::PrimitiveType_DepthwiseConv2D;
namespace mindspore::lite::micro {
int ConvolutionDepthwiseINT8Coder::Prepare(CoderContext *const context) {
@ -105,6 +103,4 @@ int ConvolutionDepthwiseINT8Coder::DoCode(CoderContext *const context) {
return RET_OK;
}
REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt8, PrimitiveType_DepthwiseConv2D,
CPUOpCoderCreator<ConvolutionDepthwiseINT8Coder>)
} // namespace mindspore::lite::micro

@ -17,6 +17,7 @@
#include "coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
#include "src/common/log_adapter.h"
#include "coder/log.h"
#include "coder/opcoders/parallel.h"
#include "nnacl/pooling_parameter.h"
namespace mindspore::lite::micro::nnacl {
@ -34,7 +35,7 @@ void NNaclFp32Serializer::CodeStruct(const std::string &name, const PoolingParam
pooling_parameter.output_batch_, pooling_parameter.output_channel_, pooling_parameter.pad_u_,
pooling_parameter.pad_d_, pooling_parameter.pad_l_, pooling_parameter.pad_r_,
// other parameter
pooling_parameter.thread_num_, nullptr, pooling_parameter.quantize_);
gThreadNum, nullptr, pooling_parameter.quantize_);
}
void NNaclFp32Serializer::CodeStruct(const std::string &name, const BatchNormParameter &batch_norm_parameter) {
@ -60,14 +61,16 @@ void NNaclFp32Serializer::CodeStruct(const std::string &name, const SoftmaxParam
}
void NNaclFp32Serializer::CodeStruct(const std::string &name, const ConvParameter &conv_parameter) {
code << "int thread_num = MSMIN(" << gThreadNum << ", " << conv_parameter.output_h_ << ");\n";
CodeBaseStruct("ConvParameter", name, conv_parameter.op_parameter_, "{}", conv_parameter.kernel_h_,
conv_parameter.kernel_w_, conv_parameter.stride_h_, conv_parameter.stride_w_,
conv_parameter.dilation_h_, conv_parameter.dilation_w_, conv_parameter.pad_u_, conv_parameter.pad_d_,
conv_parameter.pad_l_, conv_parameter.pad_r_, conv_parameter.group_, conv_parameter.tile_num_,
conv_parameter.input_batch_, conv_parameter.input_h_, conv_parameter.input_w_,
conv_parameter.input_channel_, conv_parameter.output_batch_, conv_parameter.output_h_,
conv_parameter.output_w_, conv_parameter.output_channel_, conv_parameter.op_parameter_.thread_num_,
conv_parameter.input_unit_, conv_parameter.output_unit_, conv_parameter.act_type_);
conv_parameter.output_w_, conv_parameter.output_channel_, "thread_num", conv_parameter.input_unit_,
conv_parameter.output_unit_, conv_parameter.pad_mode_, conv_parameter.act_type_,
conv_parameter.channel_multiplie_, conv_parameter.output_padding_w_, conv_parameter.output_padding_h_);
}
void NNaclFp32Serializer::CodeStruct(const std::string &name, const MatMulParameter &mat_mul_parameter) {

@ -57,7 +57,8 @@ void NNaclInt8Serializer::CodeStruct(const std::string &name, const ConvParamete
conv_parameter.input_batch_, conv_parameter.input_h_, conv_parameter.input_w_,
conv_parameter.input_channel_, conv_parameter.output_batch_, conv_parameter.output_h_,
conv_parameter.output_w_, conv_parameter.output_channel_, "thread_num", conv_parameter.input_unit_,
conv_parameter.output_unit_, conv_parameter.pad_mode_, conv_parameter.act_type_);
conv_parameter.output_unit_, conv_parameter.pad_mode_, conv_parameter.act_type_,
conv_parameter.channel_multiplie_, conv_parameter.output_padding_w_, conv_parameter.output_padding_h_);
}
void NNaclInt8Serializer::CodeStruct(const std::string &name, const MatMulParameter &matmul_parameter) {

Loading…
Cancel
Save