!5238 [MS][LITE] optimize arm cpu fp32 op: conv depthwise

Merge pull request !5238 from yangruoqi713/lite
pull/5238/MERGE
mindspore-ci-bot 5 years ago committed by Gitee
commit c6166a221c

File diff suppressed because it is too large Load Diff

@ -48,11 +48,6 @@ void DepthwiseBorder(float *dst, const float *src, const float *weight, const fl
void ConvDwC4Fp32(float *output_data, const float *input_data, const float *weight_data, const float *bias_data,
const ConvParameter *conv_param, const SlidingWindowParam *sliding, int task_id);
void ConvDw3x3Fp32FilterTrans(float *trans_weight, float *weight, int oc4);
void ConvDw3x3Fp32(float *output_data, const float *input_data, const float *weight_data, const float *bias_data,
float *trans_buffer, float *block_buffer, const ConvParameter *conv_param, int task_id);
void DeconvDwC4Fp32(float *output_data, const float *input_data, const float *weight_data, const float *bias_data,
const ConvParameter *conv_param, const SlidingWindowParam *sliding, int task_id);

@ -62,6 +62,10 @@ void PackWeightFp32(float *weight_data, ConvParameter *conv_param, float *packed
} // kernel plane loop
}
void PackWeightKHWToHWKFp32(const void *src, void *dst, int plane, int channel) {
return PackNCHWToNHWCFp32(src, dst, 1, plane, channel);
}
void PackWeightInt8(int8_t *weight_data, ConvParameter *conv_param, int8_t *packed_weight, int32_t *weight_sum) {
// original weight format : ohwi
int kernel_h = conv_param->kernel_h_;

@ -51,6 +51,8 @@ void PackInputToC8Int8(const int8_t *input_data, int16_t *packed_input, ConvPara
void PackWeightFp32(float *weight_data, ConvParameter *conv_param, float *packed_weight, int oc_block,
int oc_block_num);
void PackWeightKHWToHWKFp32(const void *src, void *dst, int plane, int channel);
void PackWeightInt8(int8_t *weight_data, ConvParameter *conv_param, int8_t *packed_weight, int32_t *weight_sum);
void PackWeightInt8Opt(int8_t *weight_data, ConvParameter *conv_param, int8_t *packed_weight, int32_t *weight_sum);

@ -283,6 +283,7 @@ int DepthwiseConv2D::InferShape(std::vector<lite::tensor::Tensor *> inputs_,
int input_channel = in_shape.at(3);
int output_w = 0, output_h = 0;
input_channel_ = input_channel;
pad_l_ = GetPadLeft();
pad_u_ = GetPadUp();
pad_d_ = GetPadDown();

@ -84,12 +84,14 @@ class DepthwiseConv2D : public PrimitiveC {
int PadDown() const { return this->pad_d_; }
int PadLeft() const { return this->pad_l_; }
int PadRight() const { return this->pad_r_; }
int GetInputChannel() const { return this->input_channel_; }
protected:
int pad_u_ = 0;
int pad_d_ = 0;
int pad_l_ = 0;
int pad_r_ = 0;
int input_channel_ = 0;
};
} // namespace lite
} // namespace mindspore

@ -435,6 +435,7 @@ OpParameter *PopulateConvDwParameter(const mindspore::lite::PrimitiveC *primitiv
conv_param->pad_d_ = convdw_lite_primitive->PadDown();
conv_param->pad_l_ = convdw_lite_primitive->PadLeft();
conv_param->pad_r_ = convdw_lite_primitive->PadRight();
conv_param->input_channel_ = convdw_lite_primitive->GetInputChannel();
conv_param->dilation_h_ = conv_primitive->GetDilateH();
conv_param->dilation_w_ = conv_primitive->GetDilateW();
auto act_type = conv_primitive->GetActivationType();

@ -15,6 +15,7 @@
*/
#include "src/runtime/kernel/arm/fp32/convolution_depthwise.h"
#include "src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow.h"
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"
@ -36,7 +37,7 @@ ConvolutionDepthwiseCPUKernel::~ConvolutionDepthwiseCPUKernel() {
}
int ConvolutionDepthwiseCPUKernel::InitWeightBias() {
// init weight: o, h, w, i; o == group, i == 1
// init weight: k, h, w, c; k == group == output_channel, c == 1
auto weight_tensor = in_tensors_[kWeightIndex];
auto origin_weight = reinterpret_cast<float *>(weight_tensor->Data());
int channel = weight_tensor->Batch();
@ -47,7 +48,7 @@ int ConvolutionDepthwiseCPUKernel::InitWeightBias() {
MS_LOG(ERROR) << "Malloc buffer failed.";
return RET_ERROR;
}
PackNCHWToNHWCFp32(origin_weight, packed_weight_, 1, weight_tensor->Height() * weight_tensor->Width(), channel);
PackWeightKHWToHWKFp32(origin_weight, packed_weight_, weight_tensor->Height() * weight_tensor->Width(), channel);
auto bias_tensor = in_tensors_[kBiasIndex];
bias_data_ = reinterpret_cast<float *>(malloc(channel * sizeof(float)));
@ -129,9 +130,13 @@ kernel::LiteKernel *CpuConvDwFp32KernelCreator(const std::vector<lite::tensor::T
const mindspore::lite::PrimitiveC *primitive) {
MS_ASSERT(opParameter != nullptr);
MS_ASSERT(desc.type == schema::PrimitiveType_DepthwiseConv2D);
auto conv_param = reinterpret_cast<ConvParameter *>(opParameter);
kernel::LiteKernel *kernel;
kernel = new (std::nothrow) kernel::ConvolutionDepthwiseCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (conv_param->input_channel_ < 32) {
kernel = new (std::nothrow) kernel::ConvolutionDepthwiseSWCPUKernel(opParameter, inputs, outputs, ctx, primitive);
} else {
kernel = new (std::nothrow) kernel::ConvolutionDepthwiseCPUKernel(opParameter, inputs, outputs, ctx, primitive);
}
if (kernel == nullptr) {
MS_LOG(ERROR) << "kernel is nullptr.";
return nullptr;

@ -1,218 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/fp32/convolution_depthwise_3x3.h"
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"
#include "src/runtime/runtime_api.h"
using mindspore::kernel::KERNEL_ARCH::kCPU;
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_DepthwiseConv2D;
namespace mindspore::kernel {
ConvolutionDepthwise3x3CPUKernel::~ConvolutionDepthwise3x3CPUKernel() {
FreeTmpBufer();
if (block_buffer_ != nullptr) {
free(block_buffer_);
block_buffer_ = nullptr;
}
if (packed_weight_ != nullptr) {
free(packed_weight_);
packed_weight_ = nullptr;
}
}
void ConvolutionDepthwise3x3CPUKernel::FreeTmpBufer() {
if (need_align_) {
if (packed_input_ != nullptr) {
free(packed_input_);
packed_input_ = nullptr;
}
if (packed_output_ != nullptr) {
free(packed_output_);
packed_output_ = nullptr;
}
}
if (trans_buffer_ != nullptr) {
free(trans_buffer_);
trans_buffer_ = nullptr;
}
}
int ConvolutionDepthwise3x3CPUKernel::InitWeightBias() {
// init weight: o, h, w, i; o == group, i == 1
auto weight_tensor = in_tensors_[kWeightIndex];
auto origin_weight = reinterpret_cast<float *>(weight_tensor->Data());
// o h w 1 -> o/4 h w 1 4
int OC4 = UP_DIV(weight_tensor->Batch(), C4NUM);
int weight_c4_size = OC4 * C4NUM * 9;
auto tmp_weight = reinterpret_cast<float *>(malloc(weight_c4_size * sizeof(float)));
if (tmp_weight == nullptr) {
MS_LOG(ERROR) << "Malloc buffer failed.";
return RET_ERROR;
}
memset(tmp_weight, 0, weight_c4_size * sizeof(float));
PackNCHWToNC4HW4Fp32(origin_weight, tmp_weight, 1, weight_tensor->Height() * weight_tensor->Width(),
weight_tensor->Batch());
// weight transform
int packed_weight_size = OC4 * C4NUM * 16;
packed_weight_ = reinterpret_cast<float *>(malloc(packed_weight_size * sizeof(float)));
if (packed_weight_ == nullptr) {
MS_LOG(ERROR) << "Malloc buffer failed.";
return RET_ERROR;
}
memset(packed_weight_, 0, packed_weight_size * sizeof(float));
ConvDw3x3Fp32FilterTrans(packed_weight_, tmp_weight, OC4);
// init bias
bias_data_ = reinterpret_cast<float *>(malloc(C4NUM * OC4 * sizeof(float)));
if (bias_data_ == nullptr) {
MS_LOG(ERROR) << "Malloc buffer failed.";
return RET_ERROR;
}
memset(bias_data_, 0, C4NUM * OC4 * sizeof(float));
if (in_tensors_.size() == kInputSize2) {
auto ori_bias = reinterpret_cast<float *>(in_tensors_.at(kBiasIndex)->Data());
memcpy(bias_data_, ori_bias, in_tensors_.at(kBiasIndex)->ElementsNum() * sizeof(float));
}
conv_param_->thread_num_ = MSMIN(thread_count_, OC4);
return RET_OK;
}
int ConvolutionDepthwise3x3CPUKernel::InitBuffer() {
if (conv_param_->input_channel_ % C4NUM != 0) {
need_align_ = true;
int IC4 = UP_DIV(conv_param_->input_channel_, C4NUM);
int pack_input_size = conv_param_->input_batch_ * conv_param_->input_h_ * conv_param_->input_w_ * C4NUM * IC4;
packed_input_ = reinterpret_cast<float *>(malloc(pack_input_size * sizeof(float)));
if (packed_input_ == nullptr) {
MS_LOG(ERROR) << "Malloc buffer failed.";
return RET_ERROR;
}
memset(packed_input_, 0, pack_input_size * sizeof(float));
int OC4 = UP_DIV(conv_param_->output_channel_, C4NUM);
int pack_output_size = conv_param_->output_batch_ * conv_param_->output_h_ * conv_param_->output_w_ * C4NUM * OC4;
packed_output_ = reinterpret_cast<float *>(malloc(pack_output_size * sizeof(float)));
if (packed_output_ == nullptr) {
MS_LOG(ERROR) << "Malloc buffer failed.";
return RET_ERROR;
}
}
// malloc transform buffer
trans_size_ = UP_DIV(conv_param_->output_w_, 2) * UP_DIV(conv_param_->output_h_, 2) * 16 * C4NUM;
size_t trans_buffer_size = thread_count_ * trans_size_ * sizeof(float);
trans_buffer_ = reinterpret_cast<float *>(malloc(trans_buffer_size));
if (trans_buffer_ == nullptr) {
MS_LOG(ERROR) << "malloc trans buffer failed.";
return RET_ERROR;
}
return RET_OK;
}
int ConvolutionDepthwise3x3CPUKernel::Init() {
// malloc one block buffer
block_buffer_ = reinterpret_cast<float *>(malloc(thread_count_ * 16 * C4NUM * sizeof(float)));
if (block_buffer_ == nullptr) {
MS_LOG(ERROR) << "malloc block buffer failed.";
return RET_ERROR;
}
auto ret = InitWeightBias();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Depthwise3x3 fp32 initWeightBias error!ret: " << ret;
return ret;
}
if (!InferShapeDone()) {
return RET_OK;
}
return ReSize();
}
int ConvolutionDepthwise3x3CPUKernel::ReSize() {
FreeTmpBufer();
ConvolutionBaseCPUKernel::Init();
auto ret = InitBuffer();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Depthwise3x3 fp32 initBuffer error!ret: " << ret;
return ret;
}
return RET_OK;
}
int ConvolutionDepthwise3x3CPUKernel::Execute(int task_id) {
auto trans_buf = trans_buffer_ + task_id * trans_size_;
auto block_buf = block_buffer_ + task_id * 16 * C4NUM;
ConvDw3x3Fp32(packed_output_, packed_input_, packed_weight_, reinterpret_cast<float *>(bias_data_), trans_buf,
block_buf, conv_param_, task_id);
return RET_OK;
}
int ConvDw3x3Run(int task_id, LiteParallelGroupEnv *penv, void *cdata) {
auto conv_dw_3x3 = reinterpret_cast<ConvolutionDepthwise3x3CPUKernel *>(cdata);
auto ret = conv_dw_3x3->Execute(task_id);
if (ret != RET_OK) {
MS_LOG(ERROR) << "ConvolutionDepthwise3x3Run error task_id[" << task_id << "] error_code[" << ret << "]";
return RET_ERROR;
}
return RET_OK;
}
int ConvolutionDepthwise3x3CPUKernel::Run() {
auto ret = Prepare();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Prepare failed.";
return ret;
}
if (conv_param_->input_channel_ != conv_param_->output_channel_) {
MS_LOG(ERROR) << "Only support input channel equals output channel.";
return RET_ERROR;
}
auto input_tensor = in_tensors_.at(kInputIndex);
auto input_addr = reinterpret_cast<float *>(input_tensor->Data());
// pack input: to nhwc4
if (need_align_) {
PackNHWCToNHWC4Fp32(input_addr, packed_input_, conv_param_->input_batch_,
conv_param_->input_h_ * conv_param_->input_w_, conv_param_->input_channel_);
} else {
packed_input_ = input_addr;
}
auto output_addr = reinterpret_cast<float *>(out_tensors_.at(kOutputIndex)->Data());
if (!need_align_) {
packed_output_ = output_addr;
}
ret = LiteBackendParallelLaunch(ConvDw3x3Run, this, conv_param_->thread_num_);
if (ret != RET_OK) {
MS_LOG(ERROR) << "ConvDw3x3Run error: error_code[" << ret << "]";
return RET_ERROR;
}
if (need_align_) {
PackNHWC4ToNHWCFp32(packed_output_, output_addr, conv_param_->output_batch_,
conv_param_->output_h_ * conv_param_->output_w_, conv_param_->output_channel_);
}
return RET_OK;
}
} // namespace mindspore::kernel

@ -1,55 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_BACKEND_ARM_FP32_CONVOLUTION_DEPTHWISE_3X3_H_
#define MINDSPORE_LITE_SRC_BACKEND_ARM_FP32_CONVOLUTION_DEPTHWISE_3X3_H_
#include <vector>
#include "src/lite_kernel.h"
#include "src/runtime/kernel/arm/base/convolution_base.h"
#include "nnacl/fp32/conv_depthwise.h"
namespace mindspore::kernel {
class ConvolutionDepthwise3x3CPUKernel : public ConvolutionBaseCPUKernel {
public:
ConvolutionDepthwise3x3CPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const Context *ctx,
const mindspore::lite::PrimitiveC *primitive)
: ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {}
~ConvolutionDepthwise3x3CPUKernel() override;
int Init() override;
int ReSize() override;
int Run() override;
int InitWeightBias();
int InitBuffer();
int Execute(int task_id);
private:
void FreeTmpBufer();
float *packed_weight_ = nullptr;
float *packed_input_ = nullptr;
float *packed_output_ = nullptr;
float *block_buffer_ = nullptr;
float *trans_buffer_ = nullptr;
int trans_size_;
bool need_align_ = false;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_BACKEND_ARM_FP32_CONVOLUTION_DEPTHWISE_3X3_H_
Loading…
Cancel
Save