!12451 [MS_LITE] fix encoder fp16 nan

From: @YeFeng_24
Reviewed-by: @hangangqiang,@zhanghaibo5
Signed-off-by: @hangangqiang
pull/12451/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit f93ff5430f

@ -68,12 +68,14 @@ int ConvolutionDelegateFP16CPUKernel::Init() {
origin_bias_ = CopyData(in_tensors_.at(kBiasIndex)); origin_bias_ = CopyData(in_tensors_.at(kBiasIndex));
need_free_ = need_free_ | BIAS_NEED_FREE; need_free_ = need_free_ | BIAS_NEED_FREE;
} }
origin_weight_data_type_ = in_tensors_[1]->data_type();
return RET_OK; return RET_OK;
} }
origin_weight_ = in_tensors_.at(kWeightIndex)->data_c(); origin_weight_ = in_tensors_.at(kWeightIndex)->data_c();
if (in_tensors_.size() == 3) { if (in_tensors_.size() == 3) {
origin_bias_ = in_tensors_.at(kBiasIndex)->data_c(); origin_bias_ = in_tensors_.at(kBiasIndex)->data_c();
} }
origin_weight_data_type_ = in_tensors_[1]->data_type();
return ReSize(); return ReSize();
} }
@ -83,7 +85,7 @@ int ConvolutionDelegateFP16CPUKernel::ReSize() {
context_); context_);
if (fp16_conv_kernel_ == nullptr) { if (fp16_conv_kernel_ == nullptr) {
fp16_conv_kernel_ = CpuConvFp16KernelSelect(in_tensors_, out_tensors_, op_parameter_, context_, primitive_, fp16_conv_kernel_ = CpuConvFp16KernelSelect(in_tensors_, out_tensors_, op_parameter_, context_, primitive_,
origin_weight_, origin_bias_); origin_weight_, origin_bias_, origin_weight_data_type_);
if (fp16_conv_kernel_ == nullptr) { if (fp16_conv_kernel_ == nullptr) {
MS_LOG(ERROR) << "Selecting execute kernel failed for conv_kernel, got a nullptr."; MS_LOG(ERROR) << "Selecting execute kernel failed for conv_kernel, got a nullptr.";
return RET_ERROR; return RET_ERROR;
@ -107,7 +109,7 @@ ConvParameter *CreateNewConvParameterFp16(ConvParameter *parameter) {
kernel::LiteKernel *CpuConvFp16KernelSelect(const std::vector<lite::Tensor *> &inputs, kernel::LiteKernel *CpuConvFp16KernelSelect(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter,
const lite::InnerContext *ctx, const mindspore::lite::PrimitiveC *primitive, const lite::InnerContext *ctx, const mindspore::lite::PrimitiveC *primitive,
void *origin_weight, void *origin_bias) { void *origin_weight, void *origin_bias, TypeId origin_weight_data_type) {
auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter); auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter);
bool use_winograd = false; bool use_winograd = false;
int out_unit; int out_unit;
@ -120,8 +122,8 @@ kernel::LiteKernel *CpuConvFp16KernelSelect(const std::vector<lite::Tensor *> &i
kernel = new (std::nothrow) kernel::ConvolutionWinogradFP16CPUKernel(op_parameter, inputs, outputs, ctx, primitive, kernel = new (std::nothrow) kernel::ConvolutionWinogradFP16CPUKernel(op_parameter, inputs, outputs, ctx, primitive,
out_unit, origin_weight, origin_bias); out_unit, origin_weight, origin_bias);
} else { } else {
kernel = new (std::nothrow) kernel = new (std::nothrow) kernel::ConvolutionFP16CPUKernel(op_parameter, inputs, outputs, ctx, primitive,
kernel::ConvolutionFP16CPUKernel(op_parameter, inputs, outputs, ctx, primitive, origin_weight, origin_bias); origin_weight, origin_bias, origin_weight_data_type);
} }
// Once kernel is selected, init func will invoke InitWeightAndBias // Once kernel is selected, init func will invoke InitWeightAndBias
auto ret = kernel->Init(); auto ret = kernel->Init();

@ -54,12 +54,13 @@ class ConvolutionDelegateFP16CPUKernel : public LiteKernel {
void *origin_weight_ = nullptr; void *origin_weight_ = nullptr;
void *origin_bias_ = nullptr; void *origin_bias_ = nullptr;
kernel::LiteKernel *fp16_conv_kernel_ = nullptr; kernel::LiteKernel *fp16_conv_kernel_ = nullptr;
TypeId origin_weight_data_type_;
}; };
kernel::LiteKernel *CpuConvFp16KernelSelect(const std::vector<lite::Tensor *> &inputs, kernel::LiteKernel *CpuConvFp16KernelSelect(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter,
const lite::InnerContext *ctx, const mindspore::lite::PrimitiveC *primitive, const lite::InnerContext *ctx, const mindspore::lite::PrimitiveC *primitive,
void *origin_weight, void *origin_bias); void *origin_weight, void *origin_bias, TypeId origin_weight_data_type);
} // namespace mindspore::kernel } // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP16_CONVOLUTION_DELEGATE_FP16_H_ #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP16_CONVOLUTION_DELEGATE_FP16_H_

@ -52,7 +52,7 @@ int ConvolutionFP16CPUKernel::InitWeightBias() {
} }
memset(packed_weight_, 0, pack_weight_size * sizeof(float16_t)); memset(packed_weight_, 0, pack_weight_size * sizeof(float16_t));
RowMajor2Col8MajorFp16(origin_weight_, packed_weight_, out_channel, in_channel * kernel_plane, RowMajor2Col8MajorFp16(origin_weight_, packed_weight_, out_channel, in_channel * kernel_plane,
filter_tensor->data_type() == kNumberTypeFloat32); origin_weight_data_type_ == kNumberTypeFloat32);
// init bias // init bias
bias_data_ = malloc(oc8 * sizeof(float16_t)); bias_data_ = malloc(oc8 * sizeof(float16_t));

@ -27,10 +27,12 @@ class ConvolutionFP16CPUKernel : public ConvolutionBaseFP16CPUKernel {
public: public:
ConvolutionFP16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, ConvolutionFP16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive, void *origin_weight, void *origin_bias) const mindspore::lite::PrimitiveC *primitive, void *origin_weight, void *origin_bias,
TypeId origin_weight_data_type)
: ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive), : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive),
origin_weight_(origin_weight), origin_weight_(origin_weight),
origin_bias_(origin_bias) {} origin_bias_(origin_bias),
origin_weight_data_type_(origin_weight_data_type) {}
~ConvolutionFP16CPUKernel() override { ~ConvolutionFP16CPUKernel() override {
if (packed_weight_ != nullptr) { if (packed_weight_ != nullptr) {
free(packed_weight_); free(packed_weight_);
@ -62,6 +64,7 @@ class ConvolutionFP16CPUKernel : public ConvolutionBaseFP16CPUKernel {
float16_t *packed_input_ = nullptr; float16_t *packed_input_ = nullptr;
float16_t *packed_weight_ = nullptr; float16_t *packed_weight_ = nullptr;
float16_t *col_major_input_ = nullptr; float16_t *col_major_input_ = nullptr;
TypeId origin_weight_data_type_;
}; };
} // namespace mindspore::kernel } // namespace mindspore::kernel

Loading…
Cancel
Save