[MSLITE][Develop] optimize conv dw arm cpu int8 op: add 3x3

pull/7526/head
yangruoqi713 4 years ago
parent 1af8c8badc
commit 161ecc4ed0

File diff suppressed because it is too large Load Diff

@ -24,9 +24,17 @@
extern "C" {
#endif
bool CheckIfUse3X3(const ConvParameter *conv_param, int channel);
void ConvDwInt8(int8_t *output_data, int32_t *output_row, const int8_t *input_data, const int16_t *weight_data,
const int32_t *bias_data, const ConvParameter *conv_param, int task_id);
void ConvDw3x3PadInt8(int8_t *output_data, const int8_t *input_data, const int16_t *weight_data,
const int32_t *bias_data, const ConvParameter *conv_param, const SlidingWindowParam *sliding);
void ConvDw3x3Int8(int8_t *output_data, int8_t *buffer, const int8_t *input_data, const int16_t *weight_data,
const int32_t *bias_data, const ConvParameter *conv_param, int task_id);
void ConvDwSWInt8(int8_t *output_data, const int8_t *input_data, const int16_t *weight_data, const int32_t *bias_data,
int8_t *input_zp, int32_t *output_zp, const ConvParameter *conv_param,
const SlidingWindowParam *sliding, int task_id);

@ -709,6 +709,25 @@ void PackNHWC8ToNHWCInt8(const void *src, void *dst, int batch, int plane, int c
}
}
void PackNCHWToNC8HW8Int8(const void *src, void *dst, int batch, int plane, int channel) {
int c8 = UP_DIV(channel, C8NUM);
for (int b = 0; b < batch; b++) {
int src_offset = b * plane * channel;
int dst_offset = b * plane * c8 * C8NUM;
for (int c = 0; c < channel; c++) {
int c8_block_num = c / C8NUM;
int c8_block_rem = c % C8NUM;
int src_c_offset = src_offset + c * plane;
int dst_c_offset = dst_offset + c8_block_num * plane * C8NUM;
for (int k = 0; k < plane; k++) {
int src_kernel_offset = src_c_offset + k;
int dst_kernel_offset = dst_c_offset + C8NUM * k + c8_block_rem;
((int8_t *)dst + dst_kernel_offset)[0] = ((int8_t *)src + src_kernel_offset)[0];
}
}
}
}
void PackNC4HW4ToNHWCInt8(const void *src, void *dst, int batch, int plane, int channel) {
int c4 = UP_DIV(channel, C4NUM);
for (int b = 0; b < batch; b++) {

@ -83,6 +83,8 @@ void PackNHWCToNHWC8Int8(const void *src, void *dst, int batch, int plane, int c
void PackNHWC8ToNHWCInt8(const void *src, void *dst, int batch, int plane, int channel);
void PackNCHWToNC8HW8Int8(const void *src, void *dst, int batch, int plane, int channel);
void PackNC4HW4ToNHWCInt8(const void *src, void *dst, int batch, int plane, int channel);
void PackNHWCToC8HWN8Int8(const void *src, void *dst, int batch, int plane, int channel);

@ -109,16 +109,11 @@ static int ConvDwFp16Run(void *cdata, int task_id) {
}
int ConvolutionDepthwiseFp16CPUKernel::Run() {
if (conv_param_->input_channel_ != conv_param_->output_channel_) {
MS_LOG(ERROR) << "Only support input channel equals output channel.";
return RET_ERROR;
}
auto ret = Prepare();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Prepare failed.";
return RET_ERROR;
}
ret = ConvolutionBaseFP16CPUKernel::GetExecuteTensor();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Get Execute tensor failed.";

@ -140,11 +140,6 @@ static int ConvDwSWFp16Run(void *cdata, int task_id) {
}
int ConvolutionDepthwiseSWFp16CPUKernel::Run() {
if (conv_param_->input_channel_ != conv_param_->output_channel_) {
MS_LOG(ERROR) << "Only support input channel equals output channel.";
return RET_ERROR;
}
auto ret = Prepare();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Prepare failed.";

@ -100,10 +100,6 @@ int ConvDwRun(void *cdata, int task_id) {
}
int ConvolutionDepthwiseCPUKernel::Run() {
if (conv_param_->input_channel_ != conv_param_->output_channel_) {
MS_LOG(ERROR) << "Only support input channel equals output channel.";
return RET_ERROR;
}
auto ret = Prepare();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Prepare failed.";

@ -134,17 +134,11 @@ int ConvDwSWRun(void *cdata, int task_id) {
}
int ConvolutionDepthwiseSWCPUKernel::Run() {
if (conv_param_->input_channel_ != conv_param_->output_channel_) {
MS_LOG(ERROR) << "Only support input channel equals output channel.";
return RET_ERROR;
}
auto ret = Prepare();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Prepare failed.";
return ret;
}
ret = InitBuffer();
if (ret != 0) {
MS_LOG(ERROR) << "Convolution depthwise fp32 InitBuffer failed.";

@ -0,0 +1,183 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/int8/convolution_depthwise_3x3_int8.h"
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"
#include "nnacl/int8/conv_depthwise_int8.h"
#include "src/runtime/runtime_api.h"
using mindspore::kernel::KERNEL_ARCH::kCPU;
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_DepthwiseConv2D;
namespace mindspore::kernel {
ConvolutionDepthwise3x3Int8CPUKernel::~ConvolutionDepthwise3x3Int8CPUKernel() {
if (sliding_ != nullptr) {
delete sliding_;
sliding_ = nullptr;
}
if (packed_weight_ != nullptr) {
free(packed_weight_);
packed_weight_ = nullptr;
}
FreeQuantParam();
}
int ConvolutionDepthwise3x3Int8CPUKernel::InitWeightBias() {
// init weight, int8 -> int16
auto weight_tensor = in_tensors_[kWeightIndex];
auto origin_weight = reinterpret_cast<int8_t *>(weight_tensor->MutableData());
int channel = weight_tensor->Batch();
if (channel % 8 != 0) {
MS_LOG(ERROR) << "ConvolutionDepthwise3x3Int8CPUKernel dosen't support channel " << channel;
return RET_ERROR;
}
int pack_weight_size = channel * weight_tensor->Height() * weight_tensor->Width();
auto tmp_weight = reinterpret_cast<int8_t *>(malloc(pack_weight_size * sizeof(int8_t)));
if (tmp_weight == nullptr) {
MS_LOG(ERROR) << "Malloc buffer failed.";
return RET_ERROR;
}
PackNCHWToNHWCInt8(origin_weight, tmp_weight, 1, weight_tensor->Height() * weight_tensor->Width(),
weight_tensor->Batch());
packed_weight_ = reinterpret_cast<int16_t *>(malloc(pack_weight_size * sizeof(int16_t)));
if (packed_weight_ == nullptr) {
MS_LOG(ERROR) << "Malloc buffer failed.";
return RET_ERROR;
}
bool filter_per_channel = conv_param_->conv_quant_arg_.per_channel_ & FILTER_PER_CHANNEL;
if (filter_per_channel) {
for (int i = 0; i < weight_tensor->Height() * weight_tensor->Width(); i++) {
for (int c = 0; c < channel; c++) {
int weight_zp = conv_param_->conv_quant_arg_.filter_quant_args_[c].zp_;
packed_weight_[i * channel + c] = (int16_t)(tmp_weight[i * channel + c] - weight_zp);
}
}
} else {
int weight_zp = conv_param_->conv_quant_arg_.filter_quant_args_[0].zp_;
for (int i = 0; i < weight_tensor->ElementsNum(); i++) {
packed_weight_[i] = (int16_t)(tmp_weight[i] - weight_zp);
}
}
free(tmp_weight);
bias_data_ = reinterpret_cast<int32_t *>(malloc(channel * sizeof(int32_t)));
if (bias_data_ == nullptr) {
MS_LOG(ERROR) << "Malloc buffer failed.";
return RET_ERROR;
}
memset(bias_data_, 0, channel * sizeof(int32_t));
if (in_tensors_.size() == kInputSize2) {
auto bias_tensor = in_tensors_.at(kBiasIndex);
auto ori_bias = reinterpret_cast<int32_t *>(bias_tensor->MutableData());
memcpy(bias_data_, ori_bias, bias_tensor->ElementsNum() * sizeof(int32_t));
}
return RET_OK;
}
int ConvolutionDepthwise3x3Int8CPUKernel::Init() {
sliding_ = new (std::nothrow) SlidingWindowParam;
if (sliding_ == nullptr) {
MS_LOG(ERROR) << "new sliding window param.";
return RET_ERROR;
}
if (!InferShapeDone()) {
return RET_OK;
}
return ReSize();
}
int ConvolutionDepthwise3x3Int8CPUKernel::ReSize() {
ConvolutionBaseCPUKernel::Init();
InitSlidingParamConvDw(sliding_, conv_param_, conv_param_->input_channel_);
auto ret = ConvolutionBaseCPUKernel::SetQuantParam();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Set quant param failed.";
return ret;
}
conv_param_->thread_num_ = MSMIN(thread_count_, conv_param_->output_h_);
ret = InitWeightBias();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Depthwise int8 InitWeightBias error!";
return ret;
}
return RET_OK;
}
int ConvolutionDepthwise3x3Int8CPUKernel::Execute(int task_id) {
auto buffer = buffer_ + 64 * 10 * 10 * task_id;
ConvDw3x3Int8(output_ptr_, buffer, input_ptr_, packed_weight_, reinterpret_cast<int32_t *>(bias_data_), conv_param_,
task_id);
return RET_OK;
}
int ConvDw3x3Int8Run(void *cdata, int task_id) {
auto conv_dw_int8 = reinterpret_cast<ConvolutionDepthwise3x3Int8CPUKernel *>(cdata);
auto ret = conv_dw_int8->Execute(task_id);
if (ret != RET_OK) {
MS_LOG(ERROR) << "ConvolutionDepthwise3x3Int8Run error task_id[" << task_id << "] error_code[" << ret << "]";
return RET_ERROR;
}
return RET_OK;
}
int ConvolutionDepthwise3x3Int8CPUKernel::InitBuffer() {
int buffer_size = 64 * 10 * 10 * conv_param_->thread_num_;
buffer_ = reinterpret_cast<int8_t *>(context_->allocator->Malloc(buffer_size * sizeof(int8_t)));
if (buffer_ == nullptr) {
MS_LOG(ERROR) << "Malloc buffer failed.";
return RET_ERROR;
}
return RET_OK;
}
int ConvolutionDepthwise3x3Int8CPUKernel::Run() {
auto ret = Prepare();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Prepare failed.";
return RET_ERROR;
}
ret = InitBuffer();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Depthwise int8 ReSize error!";
return ret;
}
auto input_tensor = in_tensors_.at(kInputIndex);
input_ptr_ = reinterpret_cast<int8_t *>(input_tensor->MutableData());
auto output_tensor = out_tensors_.at(kOutputIndex);
output_ptr_ = reinterpret_cast<int8_t *>(output_tensor->MutableData());
if (conv_param_->pad_l_ == 1 && conv_param_->pad_u_ == 1) {
ConvDw3x3PadInt8(output_ptr_, input_ptr_, packed_weight_, reinterpret_cast<int32_t *>(bias_data_), conv_param_,
sliding_);
}
ret = ParallelLaunch(this->context_->thread_pool_, ConvDw3x3Int8Run, this, conv_param_->thread_num_);
if (ret != RET_OK) {
context_->allocator->Free(buffer_);
MS_LOG(ERROR) << "ConvDwInt8Run error: error_code[" << ret << "]";
return RET_ERROR;
}
context_->allocator->Free(buffer_);
return RET_OK;
}
} // namespace mindspore::kernel

@ -0,0 +1,51 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_CONVOLUTION_DEPTHWISE_3X3_INT8_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_CONVOLUTION_DEPTHWISE_3X3_INT8_H_
#include <vector>
#include "src/lite_kernel.h"
#include "src/runtime/kernel/arm/base/convolution_base.h"
#include "nnacl/fp32/conv_depthwise.h"
namespace mindspore::kernel {
class ConvolutionDepthwise3x3Int8CPUKernel : public ConvolutionBaseCPUKernel {
public:
ConvolutionDepthwise3x3Int8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {}
~ConvolutionDepthwise3x3Int8CPUKernel() override;
int Init() override;
int ReSize() override;
int Run() override;
int InitWeightBias();
int Execute(int task_id);
private:
int InitBuffer();
SlidingWindowParam *sliding_ = nullptr;
int16_t *packed_weight_ = nullptr;
int8_t *input_ptr_ = nullptr;
int8_t *output_ptr_ = nullptr;
int8_t *buffer_ = nullptr;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_CONVOLUTION_DEPTHWISE_3X3_INT8_H_

@ -15,6 +15,7 @@
*/
#include "src/runtime/kernel/arm/int8/convolution_depthwise_int8.h"
#include "src/runtime/kernel/arm/int8/convolution_depthwise_3x3_int8.h"
#include "src/runtime/kernel/arm/int8/convolution_depthwise_slidewindow_int8.h"
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
@ -130,7 +131,7 @@ int ConvDwInt8Run(void *cdata, int task_id) {
int ConvolutionDepthwiseInt8CPUKernel::InitBuffer() {
int output_row_size = conv_param_->thread_num_ * conv_param_->output_w_ * conv_param_->output_channel_;
row_buffer_ = reinterpret_cast<int32_t *>(context_->allocator->Malloc(output_row_size * sizeof(float)));
row_buffer_ = reinterpret_cast<int32_t *>(context_->allocator->Malloc(output_row_size * sizeof(int)));
if (row_buffer_ == nullptr) {
MS_LOG(ERROR) << "Malloc buffer failed.";
return RET_ERROR;
@ -139,16 +140,11 @@ int ConvolutionDepthwiseInt8CPUKernel::InitBuffer() {
}
int ConvolutionDepthwiseInt8CPUKernel::Run() {
if (conv_param_->input_channel_ != conv_param_->output_channel_) {
MS_LOG(ERROR) << "Only support input channel equals output channel.";
return RET_ERROR;
}
auto ret = Prepare();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Prepare failed.";
return RET_ERROR;
}
ret = InitBuffer();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Depthwise int8 ReSize error!";
@ -177,7 +173,6 @@ kernel::LiteKernel *CpuConvDwInt8KernelCreator(const std::vector<lite::Tensor *>
const mindspore::lite::PrimitiveC *primitive) {
MS_ASSERT(opParameter != nullptr);
MS_ASSERT(desc.type == schema::PrimitiveType_DepthwiseConv2D);
kernel::LiteKernel *kernel;
auto act_quant_size =
MSMAX(inputs[kInputIndex]->GetQuantParams().size(), outputs[kOutputIndex]->GetQuantParams().size());

@ -29,9 +29,9 @@ using mindspore::schema::PrimitiveType_DepthwiseConv2D;
namespace mindspore::kernel {
ConvolutionDepthwiseSWInt8CPUKernel::~ConvolutionDepthwiseSWInt8CPUKernel() {
if (sliding != nullptr) {
delete sliding;
sliding = nullptr;
if (sliding_ != nullptr) {
delete sliding_;
sliding_ = nullptr;
}
if (packed_weight_ != nullptr) {
free(packed_weight_);
@ -270,8 +270,8 @@ int ConvolutionDepthwiseSWInt8CPUKernel::ReinitQuantParam() {
}
int ConvolutionDepthwiseSWInt8CPUKernel::Init() {
sliding = new (std::nothrow) SlidingWindowParam;
if (sliding == nullptr) {
sliding_ = new (std::nothrow) SlidingWindowParam;
if (sliding_ == nullptr) {
MS_LOG(ERROR) << "new sliding window param.";
return RET_ERROR;
}
@ -283,7 +283,7 @@ int ConvolutionDepthwiseSWInt8CPUKernel::Init() {
int ConvolutionDepthwiseSWInt8CPUKernel::ReSize() {
ConvolutionBaseCPUKernel::Init();
InitSlidingParamConvDw(sliding, conv_param_, C8NUM);
InitSlidingParamConvDw(sliding_, conv_param_, C8NUM);
auto ret = ConvolutionBaseCPUKernel::SetQuantParam();
if (ret != RET_OK) {
@ -306,7 +306,7 @@ int ConvolutionDepthwiseSWInt8CPUKernel::ReSize() {
int ConvolutionDepthwiseSWInt8CPUKernel::Execute(int task_id) {
ConvDwSWInt8(packed_output_, packed_input_, packed_weight_, reinterpret_cast<int32_t *>(bias_data_), input_zp_,
output_zp_, conv_param_, sliding, task_id);
output_zp_, conv_param_, sliding_, task_id);
return RET_OK;
}
@ -321,10 +321,6 @@ int ConvDwSWInt8Run(void *cdata, int task_id) {
}
int ConvolutionDepthwiseSWInt8CPUKernel::Run() {
if (conv_param_->input_channel_ != conv_param_->output_channel_) {
MS_LOG(ERROR) << "Only support input channel equals output channel.";
return RET_ERROR;
}
auto ret = Prepare();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Prepare failed.";

@ -44,7 +44,7 @@ class ConvolutionDepthwiseSWInt8CPUKernel : public ConvolutionBaseCPUKernel {
int ReinitFreeBefore();
void FreeTmpQuant();
SlidingWindowParam *sliding = nullptr;
SlidingWindowParam *sliding_ = nullptr;
int16_t *packed_weight_ = nullptr;
int8_t *packed_input_ = nullptr;
int8_t *packed_output_ = nullptr;

Loading…
Cancel
Save