!4094 [MS][LITE] add arm fp32 op: conv depthwise 3x3, add testcase for conv depthwise
Merge pull request !4094 from yangruoqi713/conv_dw_3x3pull/4094/MERGE
commit
8d4df847e5
@ -0,0 +1,199 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/kernel/arm/fp32/convolution_depthwise_3x3.h"
|
||||
#include "schema/model_generated.h"
|
||||
#include "src/kernel_registry.h"
|
||||
#include "include/errorcode.h"
|
||||
#include "src/runtime/runtime_api.h"
|
||||
|
||||
using mindspore::kernel::KERNEL_ARCH::kCPU;
|
||||
using mindspore::lite::KernelRegistrar;
|
||||
using mindspore::lite::RET_ERROR;
|
||||
using mindspore::lite::RET_OK;
|
||||
using mindspore::schema::PrimitiveType_DepthwiseConv2D;
|
||||
|
||||
namespace mindspore::kernel {
|
||||
int ConvolutionDepthwise3x3CPUKernel::InitWeightBias() {
|
||||
// init weight: o, h, w, i; o == group, i == 1
|
||||
auto weight_tensor = inputs_[kWeightIndex];
|
||||
auto origin_weight = reinterpret_cast<float *>(weight_tensor->Data());
|
||||
// o h w 1 -> o/4 h w 1 4
|
||||
int OC4 = UP_DIV(conv_param_->output_channel_, C4NUM);
|
||||
int weight_c4_size = OC4 * C4NUM * 9;
|
||||
auto tmp_weight = reinterpret_cast<float *>(malloc(weight_c4_size * sizeof(float)));
|
||||
if (tmp_weight == nullptr) {
|
||||
MS_LOG(ERROR) << "Malloc buffer failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
memset(tmp_weight, 0, weight_c4_size * sizeof(float));
|
||||
PackNCHWToNC4HW4Fp32(origin_weight, tmp_weight, 1, conv_param_->kernel_h_ * conv_param_->kernel_w_,
|
||||
conv_param_->output_channel_);
|
||||
|
||||
// weight transform
|
||||
int packed_weight_size = OC4 * C4NUM * 16;
|
||||
packed_weight_ = reinterpret_cast<float *>(malloc(packed_weight_size * sizeof(float)));
|
||||
if (packed_weight_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Malloc buffer failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
memset(packed_weight_, 0, packed_weight_size * sizeof(float));
|
||||
ConvDw3x3Fp32FilterTrans(packed_weight_, tmp_weight, OC4);
|
||||
|
||||
// init bias
|
||||
bias_data_ = reinterpret_cast<float *>(malloc(C4NUM * OC4 * sizeof(float)));
|
||||
if (bias_data_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Malloc buffer failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
memset(bias_data_, 0, C4NUM * OC4 * sizeof(float));
|
||||
if (inputs_.size() == kInputSize2) {
|
||||
auto ori_bias = reinterpret_cast<float *>(inputs_.at(kBiasIndex)->Data());
|
||||
memcpy(bias_data_, ori_bias, conv_param_->output_channel_ * sizeof(float));
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ConvolutionDepthwise3x3CPUKernel::InitBuffer() {
|
||||
if (conv_param_->input_channel_ % C4NUM != 0) {
|
||||
need_align_ = true;
|
||||
int IC4 = UP_DIV(conv_param_->input_channel_, C4NUM);
|
||||
int pack_input_size = conv_param_->input_batch_ * conv_param_->input_h_ * conv_param_->input_w_ * C4NUM * IC4;
|
||||
packed_input_ = reinterpret_cast<float *>(malloc(pack_input_size * sizeof(float)));
|
||||
if (packed_input_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Malloc buffer failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
memset(packed_input_, 0, pack_input_size * sizeof(float));
|
||||
|
||||
int OC4 = UP_DIV(conv_param_->output_channel_, C4NUM);
|
||||
int pack_output_size = conv_param_->output_batch_ * conv_param_->output_h_ * conv_param_->output_w_ * C4NUM * OC4;
|
||||
packed_output_ = reinterpret_cast<float *>(malloc(pack_output_size * sizeof(float)));
|
||||
if (packed_output_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Malloc buffer failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
// malloc transform buffer
|
||||
trans_size_ = UP_DIV(conv_param_->output_w_, 2) * UP_DIV(conv_param_->output_h_, 2) * 16 * C4NUM;
|
||||
size_t trans_buffer_size = thread_count_ * trans_size_ * sizeof(float);
|
||||
trans_buffer_ = reinterpret_cast<float *>(malloc(trans_buffer_size));
|
||||
if (trans_buffer_ == nullptr) {
|
||||
MS_LOG(ERROR) << "malloc trans buffer failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ConvolutionDepthwise3x3CPUKernel::Init() {
|
||||
// conv base init
|
||||
ConvolutionBaseCPUKernel::Init();
|
||||
|
||||
auto ret = InitWeightBias();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Depthwise3x3 fp32 initWeightBias error!";
|
||||
return ret;
|
||||
}
|
||||
|
||||
// init threadNum;
|
||||
conv_param_->thread_num_ = MSMIN(thread_count_, UP_DIV(conv_param_->output_channel_, C4NUM));
|
||||
|
||||
ret = InitBuffer();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Depthwise3x3 fp32 initBuffer error!";
|
||||
return ret;
|
||||
}
|
||||
|
||||
// malloc one block buffer
|
||||
block_buffer_ = reinterpret_cast<float *>(malloc(thread_count_ * 16 * C4NUM * sizeof(float)));
|
||||
if (block_buffer_ == nullptr) {
|
||||
MS_LOG(ERROR) << "malloc block buffer failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ConvolutionDepthwise3x3CPUKernel::ReSize() {
|
||||
if (need_align_) {
|
||||
free(packed_input_);
|
||||
free(packed_output_);
|
||||
}
|
||||
free(trans_buffer_);
|
||||
|
||||
// conv base init
|
||||
ConvolutionBaseCPUKernel::Init();
|
||||
|
||||
auto ret = InitBuffer();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Depthwise3x3 fp32 initBuffer error!";
|
||||
return ret;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ConvolutionDepthwise3x3CPUKernel::Execute(int task_id) {
|
||||
auto trans_buf = trans_buffer_ + task_id * trans_size_;
|
||||
auto block_buf = block_buffer_ + task_id * 16 * C4NUM;
|
||||
ConvDw3x3Fp32(packed_output_, packed_input_, packed_weight_, reinterpret_cast<float *>(bias_data_), trans_buf,
|
||||
block_buf, conv_param_, task_id);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ConvDw3x3Run(int task_id, LiteParallelGroupEnv *penv, void *cdata) {
|
||||
auto conv_dw_3x3 = reinterpret_cast<ConvolutionDepthwise3x3CPUKernel *>(cdata);
|
||||
auto ret = conv_dw_3x3->Execute(task_id);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "ConvolutionDepthwise3x3Run error task_id[" << task_id << "] error_code[" << ret << "]";
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ConvolutionDepthwise3x3CPUKernel::Run() {
|
||||
if (conv_param_->input_channel_ != conv_param_->output_channel_) {
|
||||
MS_LOG(ERROR) << "Only support input channel equals output channel.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
auto input_tensor = inputs_.at(kInputIndex);
|
||||
auto input_addr = reinterpret_cast<float *>(input_tensor->Data());
|
||||
|
||||
// pack input: to nhwc4
|
||||
if (need_align_) {
|
||||
PackNHWCToNHWC4Fp32(input_addr, packed_input_, conv_param_->input_batch_,
|
||||
conv_param_->input_h_ * conv_param_->input_w_, conv_param_->input_channel_);
|
||||
} else {
|
||||
packed_input_ = input_addr;
|
||||
}
|
||||
|
||||
auto output_addr = reinterpret_cast<float *>(outputs_.at(kOutputIndex)->Data());
|
||||
if (!need_align_) {
|
||||
packed_output_ = output_addr;
|
||||
}
|
||||
|
||||
auto ret = LiteBackendParallelLaunch(ConvDw3x3Run, this, conv_param_->thread_num_);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "ConvDw3x3Run error: error_code[" << ret << "]";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
if (need_align_) {
|
||||
PackNHWC4ToNHWCFp32(packed_output_, output_addr, conv_param_->output_batch_,
|
||||
conv_param_->output_h_ * conv_param_->output_w_, conv_param_->output_channel_);
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
} // namespace mindspore::kernel
|
@ -0,0 +1,61 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_SRC_BACKEND_ARM_FP32_CONVOLUTION_DEPTHWISE_3X3_H_
|
||||
#define MINDSPORE_LITE_SRC_BACKEND_ARM_FP32_CONVOLUTION_DEPTHWISE_3X3_H_
|
||||
|
||||
#include <vector>
|
||||
#include "src/lite_kernel.h"
|
||||
#include "src/runtime/kernel/arm/base/convolution_base.h"
|
||||
#include "src/runtime/kernel/arm/nnacl/fp32/conv_depthwise.h"
|
||||
|
||||
namespace mindspore::kernel {
|
||||
class ConvolutionDepthwise3x3CPUKernel : public ConvolutionBaseCPUKernel {
|
||||
public:
|
||||
ConvolutionDepthwise3x3CPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
|
||||
const std::vector<lite::tensor::Tensor *> &outputs, const Context *ctx)
|
||||
: ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx) {}
|
||||
|
||||
~ConvolutionDepthwise3x3CPUKernel() override {
|
||||
free(packed_weight_);
|
||||
if (need_align_) {
|
||||
free(packed_input_);
|
||||
free(packed_output_);
|
||||
}
|
||||
free(block_buffer_);
|
||||
free(trans_buffer_);
|
||||
};
|
||||
|
||||
int Init() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
|
||||
int InitWeightBias();
|
||||
int InitBuffer();
|
||||
int Execute(int task_id);
|
||||
|
||||
private:
|
||||
float *packed_weight_;
|
||||
float *packed_input_;
|
||||
float *packed_output_;
|
||||
float *block_buffer_;
|
||||
float *trans_buffer_;
|
||||
int trans_size_;
|
||||
bool need_align_ = false;
|
||||
};
|
||||
} // namespace mindspore::kernel
|
||||
|
||||
#endif // MINDSPORE_LITE_SRC_BACKEND_ARM_FP32_CONVOLUTION_DEPTHWISE_3X3_H_
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,198 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
#include "utils/log_adapter.h"
|
||||
#include "common/common_test.h"
|
||||
#include "src/common/file_utils.h"
|
||||
#include "mindspore/lite/src/runtime/kernel/arm/base/convolution_base.h"
|
||||
#include "mindspore/lite/src/kernel_registry.h"
|
||||
#include "mindspore/lite/src/ops/ops.h"
|
||||
|
||||
namespace mindspore {
|
||||
class TestConvolutionDwFp32 : public mindspore::Common {
|
||||
public:
|
||||
TestConvolutionDwFp32() {}
|
||||
};
|
||||
|
||||
void InitConvDwParam(ConvParameter *conv_param) {
|
||||
conv_param->input_batch_ = 1;
|
||||
conv_param->input_h_ = 288;
|
||||
conv_param->input_w_ = 288;
|
||||
conv_param->input_channel_ = 25;
|
||||
|
||||
conv_param->output_batch_ = 1;
|
||||
conv_param->output_h_ = 288;
|
||||
conv_param->output_w_ = 288;
|
||||
conv_param->output_channel_ = 25;
|
||||
|
||||
conv_param->kernel_h_ = 3;
|
||||
conv_param->kernel_w_ = 3;
|
||||
|
||||
conv_param->stride_h_ = 1;
|
||||
conv_param->stride_w_ = 1;
|
||||
|
||||
conv_param->dilation_h_ = 1;
|
||||
conv_param->dilation_w_ = 1;
|
||||
|
||||
conv_param->pad_h_ = 1;
|
||||
conv_param->pad_w_ = 1;
|
||||
}
|
||||
|
||||
void InitConvDwCreator(std::vector<lite::tensor::Tensor *> *inputs, std::vector<lite::tensor::Tensor *> *outputs,
|
||||
const ConvParameter *conv_param) {
|
||||
// prepare input, format NHWC
|
||||
size_t input_size;
|
||||
std::string input_path = "./test_data/convDw/convDwfp32_input.bin";
|
||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||
|
||||
auto *input = new lite::tensor::Tensor;
|
||||
input->set_data_type(kNumberTypeFloat32);
|
||||
input->SetFormat(schema::Format_NHWC);
|
||||
input->set_shape({conv_param->input_batch_, conv_param->input_h_, conv_param->input_w_, conv_param->input_channel_});
|
||||
input->MallocData();
|
||||
memcpy(input->Data(), input_data, input_size);
|
||||
|
||||
// prepare weight, format co kh kw ci, ci = 1
|
||||
size_t weight_size;
|
||||
std::string weight_path = "./test_data/convDw/convDwfp32_weight.bin";
|
||||
auto weight_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(weight_path.c_str(), &weight_size));
|
||||
|
||||
auto *weight = new lite::tensor::Tensor;
|
||||
weight->set_data_type(kNumberTypeFloat32);
|
||||
weight->set_shape({conv_param->output_channel_, conv_param->kernel_h_, conv_param->kernel_w_, 1});
|
||||
weight->MallocData();
|
||||
memcpy(weight->Data(), weight_data, weight_size);
|
||||
|
||||
// prepare bias
|
||||
auto *bias = new lite::tensor::Tensor;
|
||||
bias->set_data_type(kNumberTypeFloat32);
|
||||
bias->set_shape({conv_param->output_channel_});
|
||||
bias->MallocData();
|
||||
memset(bias->Data(), 0, bias->ElementsNum() * sizeof(float));
|
||||
|
||||
inputs->push_back(input);
|
||||
inputs->push_back(weight);
|
||||
inputs->push_back(bias);
|
||||
|
||||
auto *output = new lite::tensor::Tensor;
|
||||
output->set_data_type(kNumberTypeFloat32);
|
||||
output->set_shape(
|
||||
{conv_param->output_batch_, conv_param->output_h_, conv_param->output_w_, conv_param->output_channel_});
|
||||
output->SetFormat(schema::Format_NHWC);
|
||||
output->MallocData();
|
||||
memset(output->Data(), 0, output->ElementsNum() * sizeof(float));
|
||||
outputs->push_back(output);
|
||||
}
|
||||
|
||||
TEST_F(TestConvolutionDwFp32, ConvDwFp32Accuracy) {
|
||||
// prepare stage
|
||||
auto conv_param = new ConvParameter();
|
||||
InitConvDwParam(conv_param);
|
||||
|
||||
// init ctx
|
||||
auto ctx = new Context();
|
||||
ctx->thread_num_ = 4;
|
||||
|
||||
// init tensor
|
||||
std::vector<lite::tensor::Tensor *> inputs;
|
||||
std::vector<lite::tensor::Tensor *> outputs;
|
||||
InitConvDwCreator(&inputs, &outputs, conv_param);
|
||||
|
||||
// register op
|
||||
kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_DepthwiseConv2D};
|
||||
auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc);
|
||||
ASSERT_NE(creator, nullptr);
|
||||
kernel::LiteKernel *kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), ctx, desc);
|
||||
ASSERT_NE(kernel, nullptr);
|
||||
// op run
|
||||
kernel->Run();
|
||||
|
||||
std::cout << "==================output data=================" << std::endl;
|
||||
auto output_ptr = reinterpret_cast<float *>(outputs[0]->Data());
|
||||
for (int i = 0; i < 20; i++) {
|
||||
std::cout << output_ptr[i] << ", ";
|
||||
}
|
||||
std::cout << std::endl;
|
||||
|
||||
// read output data, format NHWC
|
||||
size_t output_size;
|
||||
std::string output_path = "./test_data/convDw/convDwfp32_output.bin";
|
||||
auto correct_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(output_path.c_str(), &output_size));
|
||||
|
||||
// compare
|
||||
CompareOutputData(output_ptr, correct_data, outputs[0]->ElementsNum(), 0.0001);
|
||||
|
||||
delete conv_param;
|
||||
for (int i = 0; i < inputs.size(); i++) {
|
||||
delete inputs[i];
|
||||
}
|
||||
for (int i = 0; i < outputs.size(); i++) {
|
||||
delete outputs[i];
|
||||
}
|
||||
delete kernel;
|
||||
delete correct_data;
|
||||
MS_LOG(INFO) << "TestConvolutionDwFp32 accuracy passed";
|
||||
}
|
||||
|
||||
TEST_F(TestConvolutionDwFp32, ConvDwFp32Performance) {
|
||||
// prepare stage
|
||||
auto conv_param = new ConvParameter();
|
||||
InitConvDwParam(conv_param);
|
||||
|
||||
// init ctx
|
||||
auto ctx = new Context();
|
||||
ctx->thread_num_ = 1;
|
||||
|
||||
// init tensor
|
||||
std::vector<lite::tensor::Tensor *> inputs;
|
||||
std::vector<lite::tensor::Tensor *> outputs;
|
||||
InitConvDwCreator(&inputs, &outputs, conv_param);
|
||||
|
||||
// register op
|
||||
kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_DepthwiseConv2D};
|
||||
auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc);
|
||||
ASSERT_NE(creator, nullptr);
|
||||
kernel::LiteKernel *kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), ctx, desc);
|
||||
ASSERT_NE(kernel, nullptr);
|
||||
|
||||
/* running warm up */
|
||||
for (int i = 0; i < 3; i++) {
|
||||
kernel->Run();
|
||||
}
|
||||
|
||||
/* running time cost */
|
||||
int loop_count = 10;
|
||||
auto time_start = mindspore::lite::GetTimeUs();
|
||||
for (int i = 0; i < loop_count; i++) {
|
||||
kernel->Run();
|
||||
}
|
||||
auto time_end = mindspore::lite::GetTimeUs();
|
||||
auto cost = time_end - time_start;
|
||||
uint64_t time_avg = cost / loop_count;
|
||||
printf("Convolution_depthwise fp32 average time : %f ms\n", time_avg / 1000.0f);
|
||||
|
||||
delete conv_param;
|
||||
for (int i = 0; i < inputs.size(); i++) {
|
||||
delete inputs[i];
|
||||
}
|
||||
for (int i = 0; i < outputs.size(); i++) {
|
||||
delete outputs[i];
|
||||
}
|
||||
delete kernel;
|
||||
MS_LOG(INFO) << "TestConvolutionDwFp32 performance passed";
|
||||
}
|
||||
} // namespace mindspore
|
Loading…
Reference in new issue