!6966 [MS][LITE]optimize winograd input transform func

Merge pull request !6966 from fuzhiye/tmp
pull/6966/MERGE
mindspore-ci-bot 5 years ago committed by Gitee
commit 56c3d2219e

@ -77,7 +77,6 @@ void ConvWinogardFp32(float *input_data, float *trans_weight, const float *bias_
int input_unit = conv_param->input_unit_;
int in_batch = conv_param->input_batch_;
int in_channel = conv_param->input_channel_;
int ic4 = UP_DIV(in_channel, C4NUM);
int out_unit = conv_param->output_unit_;
int out_w_block = UP_DIV(conv_param->output_w_, out_unit);
int out_h_block = UP_DIV(conv_param->output_h_, out_unit);
@ -96,10 +95,10 @@ void ConvWinogardFp32(float *input_data, float *trans_weight, const float *bias_
float *gemm_out = buffer_list[1];
float *tmp_data = buffer_list[2];
float *col_buffer = buffer_list[3];
int trans_input_offset = tile_num * input_unit_square * ic4 * C4NUM;
int trans_input_offset = tile_num * input_unit_square * in_channel;
int gemm_out_offset = tile_num * input_unit_square * oc8 * C8NUM;
int tmp_data_offset = input_unit_square * C4NUM;
int col_buffer_offset = tile_num * ic4 * C4NUM;
int col_buffer_offset = tile_num * in_channel;
// step 1 : filter transform (pre-processed offline)
// step 2 : input transform (online)
for (int b = 0; b < in_batch; b++) {
@ -107,7 +106,7 @@ void ConvWinogardFp32(float *input_data, float *trans_weight, const float *bias_
int out_batch_offset = b * out_channel * conv_param->output_w_ * conv_param->output_h_;
for (int thread_id = task_id; thread_id < output_tile_count; thread_id += thread_num) {
int out_tile_index = thread_id * tile_num;
int cal_num = output_count - thread_id * tile_num;
int cal_num = output_count - out_tile_index;
cal_num = cal_num > tile_num ? tile_num : cal_num;
WinogradInputTransform(input_data + in_batch_offset, trans_input + task_id * trans_input_offset,
tmp_data + task_id * tmp_data_offset, cal_num, out_tile_index, out_w_block, conv_param,
@ -118,11 +117,11 @@ void ConvWinogardFp32(float *input_data, float *trans_weight, const float *bias_
float *tmp_col_ptr = col_buffer + task_id * col_buffer_offset;
for (int i = 0; i < input_unit_square; ++i) {
#ifdef ENABLE_ARM32
RowMajor2Col4Major(src_ptr + i * C4NUM * ic4 * C4NUM, tmp_col_ptr, C4NUM, ic4 * C4NUM);
RowMajor2Col4Major(src_ptr + i * C4NUM * in_channel, tmp_col_ptr, C4NUM, in_channel);
#else
RowMajor2Col12Major(src_ptr + i * C12NUM * ic4 * C4NUM, tmp_col_ptr, C12NUM, ic4 * C4NUM);
RowMajor2Col12Major(src_ptr + i * C12NUM * in_channel, tmp_col_ptr, C12NUM, in_channel);
#endif
MatMulOpt(tmp_col_ptr, trans_weight + i * ic4 * C4NUM * oc8 * C8NUM, dst_ptr + i * C8NUM, NULL, 0, ic4 * C4NUM,
MatMulOpt(tmp_col_ptr, trans_weight + i * in_channel * oc8 * C8NUM, dst_ptr + i * C8NUM, NULL, 0, in_channel,
cal_num, oc8 * C8NUM, input_unit_square, 2);
}

File diff suppressed because it is too large Load Diff

@ -46,11 +46,6 @@ void Pack1x1WeightFp32(const float *weight_data, float *packed_weight, ConvParam
void PackInputSum16x4Int8(const int8_t *input, int32_t *input_sum, int32_t *filter_zp, ConvParameter *conv_param);
void PackInputSum8x4Int8(const int8_t *input_value, int32_t *input_sum, size_t input_channel, size_t output_channel,
size_t plane_size, ConvParameter *conv_param);
void MatrixPack(const float *src, float *dst, int row, int ic4, int stride);
void PackInputToC8Int8(const int8_t *input_data, int16_t *packed_input, ConvParameter *conv_param);
void PackWeightKHWToHWKFp32(const void *src, void *dst, int plane, int channel);
@ -75,20 +70,10 @@ void PackNCHWToNHWCFp32(const void *src, void *dst, int batch, int plane, int ch
void PackNHWC4ToNHWCFp32(const void *src, void *dst, int batch, int plane, int channel);
void PackNCHWToNHWC4Fp32(const void *src, void *dst, int batch, int plane, int channel);
void PackNC4HW4ToNHWC4Fp32(const void *src, void *dst, int batch, int plane, int channel);
void PackNC4HW4ToNHWCFp32(const void *src, void *dst, int batch, int plane, int channel);
void PackNC4HW4ToNHWCReluFp32(const void *src, void *dst, int batch, int plane, int channel);
void PackNC4HW4ToNHWCRelu6Fp32(const void *src, void *dst, int batch, int plane, int channel);
void PackNC4HW4ToNHWCPreluFp32(const void *src, void *dst, const void *slope, int batch, int plane, int channel);
void PackNC4HW4ToNCHWFp32(const void *src, void *dst, int batch, int plane, int channel);
void PackNHWCToC8HWN8Fp32(const void *src, void *dst, int batch, int plane, int channel);
void PackNHWCToNHWC4Int8(const void *src, void *dst, int batch, int plane, int channel);
@ -99,18 +84,10 @@ void PackNHWCToNHWC8Int8(const void *src, void *dst, int batch, int plane, int c
void PackNHWC8ToNHWCInt8(const void *src, void *dst, int batch, int plane, int channel);
void PackNCHWToNHWC4Int8(const void *src, void *dst, int batch, int plane, int channel);
void PackNC4HW4ToNHWC4Int8(const void *src, void *dst, int batch, int plane, int channel);
void PackNC4HW4ToNHWCInt8(const void *src, void *dst, int batch, int plane, int channel);
void PackNC4HW4ToNCHWInt8(const void *src, void *dst, int batch, int plane, int channel);
void PackNHWCToC8HWN8Int8(const void *src, void *dst, int batch, int plane, int channel);
void PackNHWCToNC8HW8Int8(const void *src, void *dst, int batch, int plane, int channel);
void PackNCHWToNHWCInt8(const void *src, void *dst, int batch, int plane, int channel);
void PackDepthwiseInt8Input(const int8_t *src, int16_t *dst, const ConvParameter *conv_param);

@ -42,7 +42,7 @@ void WinogradInputTransform(const float *input_data, float *trans_input, float *
int interval_y_e = src_y_e < input_h ? input_unit : (input_h - src_y_s);
int src_plane_offset = in_channel * (src_y_s * input_w + src_x_s);
int dst_plane_offset = c * C4NUM * ic4;
int dst_plane_offset = c * in_channel;
for (int ic = 0; ic < ic4; ic++) {
// clear tmp buffer
memset(tmp_data, 0, input_unit * input_unit * C4NUM * sizeof(float));
@ -91,9 +91,9 @@ void WinogradInputTransform(const float *input_data, float *trans_input, float *
const int tile_num = 12;
#endif
int dst_ic4_offset = dst_plane_offset + ic * C4NUM;
size_t dst_step = tile_num * ic4 * C4NUM;
size_t dst_step = tile_num * in_channel;
float *trans_input_ptr = trans_input + dst_ic4_offset;
func(tmp_data, trans_input_ptr, C4NUM, dst_step);
func(tmp_data, trans_input_ptr, C4NUM, dst_step, real_c);
}
out_tile_index++;
} // cal_tile_num loop

File diff suppressed because it is too large Load Diff

@ -28,7 +28,7 @@
#ifdef __cplusplus
extern "C" {
#endif
typedef void (*InputTransFunc)(const float *src_data, float *dst_data, int src_step, int dst_step);
typedef void (*InputTransFunc)(const float *src_data, float *dst_data, int src_step, int dst_step, int real_c);
typedef void (*OutputTransFunc)(const float *src_data, float *dst_data, const float *bias_data, int src_step,
int dst_step, int out_c, int r_w, int r_h, int r_c);
@ -163,11 +163,11 @@ void GeneralOutputTransformUnit(const float *src_data, float *dst_data, const fl
InputTransFunc GetInputTransFunc(int input_unit);
void InputTransform4x4Unit(const float *src_data, float *dst_data, int src_step, int dst_step);
void InputTransform4x4Unit(const float *src_data, float *dst_data, int src_step, int dst_step, int real_c);
void InputTransform6x6Unit(const float *src_data, float *dst_data, int src_step, int dst_step);
void InputTransform6x6Unit(const float *src_data, float *dst_data, int src_step, int dst_step, int real_c);
void InputTransform8x8Unit(const float *src_data, float *dst_data, int src_step, int dst_step);
void InputTransform8x8Unit(const float *src_data, float *dst_data, int src_step, int dst_step, int real_c);
OutputTransFunc GetOutputTransFunc(int input_unit, int output_unit, ActType act_type);

@ -39,21 +39,18 @@ int ConvolutionWinogradCPUKernel::WinogradFilterTransform(const float *weight_da
// original weight format : ohwi
auto channel_in = conv_param_->input_channel_;
auto channel_out = conv_param_->output_channel_;
int ic4 = UP_DIV(channel_in, C4NUM);
int oc_block_num = UP_DIV(channel_out, oc_block);
int c4_channel = ic4 * C4NUM;
int block_stride = c4_channel * oc_block;
int block_stride = channel_in * oc_block;
int block_num_stride = block_stride * oc_block_num;
// trans_filter = G*g*GT (g represents weight_data)
// separate into two steps ===> tmp = (g * GT)T ===> trans = (tmp * GT)T use same function:MatrixMultiplyWinograd
auto tmp_data = reinterpret_cast<float *>(malloc(c4_channel * input_unit_ * kernel_unit_ * sizeof(float)));
auto tmp_data = reinterpret_cast<float *>(malloc(channel_in * input_unit_ * kernel_unit_ * sizeof(float)));
if (tmp_data == nullptr) {
MS_LOG(ERROR) << "malloc tmp_data failed.";
return RET_MEMORY_FAILED;
}
memset(tmp_data, 0, c4_channel * input_unit_ * kernel_unit_ * sizeof(float));
auto trans_out_data = reinterpret_cast<float *>(malloc(c4_channel * input_unit_ * input_unit_ * sizeof(float)));
auto trans_out_data = reinterpret_cast<float *>(malloc(channel_in * input_unit_ * input_unit_ * sizeof(float)));
if (trans_out_data == nullptr) {
free(tmp_data);
MS_LOG(ERROR) << "malloc trans_out_data failed.";
@ -61,14 +58,14 @@ int ConvolutionWinogradCPUKernel::WinogradFilterTransform(const float *weight_da
}
#ifndef ENABLE_ARM64
auto tmp_data1 = reinterpret_cast<float *>(malloc(c4_channel * input_unit_ * kernel_unit_ * sizeof(float)));
auto tmp_data1 = reinterpret_cast<float *>(malloc(channel_in * input_unit_ * kernel_unit_ * sizeof(float)));
if (tmp_data1 == nullptr) {
free(tmp_data);
free(trans_out_data);
MS_LOG(ERROR) << "malloc tmp_data1 failed.";
return RET_MEMORY_FAILED;
}
auto trans_out_data1 = reinterpret_cast<float *>(malloc(c4_channel * input_unit_ * input_unit_ * sizeof(float)));
auto trans_out_data1 = reinterpret_cast<float *>(malloc(channel_in * input_unit_ * input_unit_ * sizeof(float)));
if (trans_out_data1 == nullptr) {
free(tmp_data);
free(tmp_data1);
@ -87,30 +84,30 @@ int ConvolutionWinogradCPUKernel::WinogradFilterTransform(const float *weight_da
#ifndef ENABLE_ARM64
// tmp_data = g * GT
MatrixMultiplyWinograd(weight_data + i * input_oz_offset, matrix_gt, tmp_data, kernel_unit_, kernel_unit_,
input_unit_, channel_in, c4_channel * 4);
input_unit_, channel_in, channel_in * 4);
// tmp_data1 = (tmp_data)T
PackHWCToWHC(tmp_data, tmp_data1, kernel_unit_, input_unit_, c4_channel);
PackHWCToWHC(tmp_data, tmp_data1, kernel_unit_, input_unit_, channel_in);
// trans_out_data1 = tmp * GT
MatrixMultiplyWinograd(tmp_data1, matrix_gt, trans_out_data1, input_unit_, kernel_unit_, input_unit_, c4_channel,
c4_channel * 4);
MatrixMultiplyWinograd(tmp_data1, matrix_gt, trans_out_data1, input_unit_, kernel_unit_, input_unit_, channel_in,
channel_in * 4);
// trans_out_data = (trans_out_data1)T
PackHWCToWHC(trans_out_data1, trans_out_data, input_unit_, input_unit_, c4_channel);
PackHWCToWHC(trans_out_data1, trans_out_data, input_unit_, input_unit_, channel_in);
#else
// tmp = (g * GT)T
MatrixMultiplyWinograd(weight_data + i * input_oz_offset, matrix_gt, tmp_data, kernel_unit_, kernel_unit_,
input_unit_, channel_in, c4_channel * 4);
input_unit_, channel_in, channel_in * 4);
// trans = (tmp * GT)T
MatrixMultiplyWinograd(tmp_data, matrix_gt, trans_out_data, input_unit_, kernel_unit_, input_unit_, c4_channel,
c4_channel * 4);
MatrixMultiplyWinograd(tmp_data, matrix_gt, trans_out_data, input_unit_, kernel_unit_, input_unit_, channel_in,
channel_in * 4);
#endif
int in_offset = 0;
for (int j = 0; j < input_unit_; ++j) {
for (int k = 0; k < input_unit_; ++k) {
for (int c = 0; c < c4_channel; ++c) {
for (int c = 0; c < channel_in; ++c) {
*(trans_weight_ + output_oz_offset + c * oc_block) = trans_out_data[in_offset + c];
}
in_offset += c4_channel;
in_offset += channel_in;
output_oz_offset += block_num_stride;
}
}
@ -128,7 +125,6 @@ int ConvolutionWinogradCPUKernel::InitWeightBias() {
auto filter_tensor = in_tensors_.at(kWeightIndex);
int in_channel = filter_tensor->Channel();
int out_channel = filter_tensor->Batch();
int ic4 = UP_DIV(in_channel, C4NUM);
conv_param_->input_channel_ = in_channel;
conv_param_->output_channel_ = out_channel;
@ -137,7 +133,7 @@ int ConvolutionWinogradCPUKernel::InitWeightBias() {
int oc_block_num = UP_DIV(out_channel, C8NUM);
// set data
auto trans_matrix_data_size = input_unit_ * input_unit_ * ic4 * C4NUM * oc_block_num * oc_block * sizeof(float);
auto trans_matrix_data_size = input_unit_ * input_unit_ * in_channel * oc_block_num * oc_block * sizeof(float);
trans_weight_ = reinterpret_cast<float *>(malloc(trans_matrix_data_size));
if (trans_weight_ == nullptr) {
MS_LOG(ERROR) << "malloc matrix_buffer failed.";
@ -188,7 +184,6 @@ int ConvolutionWinogradCPUKernel::InitWeightBias() {
int ConvolutionWinogradCPUKernel::InitTmpBuffer() {
int channel_out = conv_param_->output_channel_;
int oc8 = UP_DIV(channel_out, C8NUM);
int ic4 = UP_DIV(conv_param_->input_channel_, C4NUM);
#ifdef ENABLE_ARM32
int tile_num = 4;
#else
@ -196,7 +191,8 @@ int ConvolutionWinogradCPUKernel::InitTmpBuffer() {
#endif
MS_ASSERT(ctx_->allocator != nullptr);
size_t tile_buffer_size = thread_count_ * tile_num * input_unit_ * input_unit_ * ic4 * C4NUM * sizeof(float);
size_t tile_buffer_size =
thread_count_ * tile_num * input_unit_ * input_unit_ * conv_param_->input_channel_ * sizeof(float);
trans_input_ = reinterpret_cast<float *>(ctx_->allocator->Malloc(tile_buffer_size));
if (trans_input_ == nullptr) {
MS_LOG(ERROR) << "malloc trans_input_ failed.";
@ -217,8 +213,8 @@ int ConvolutionWinogradCPUKernel::InitTmpBuffer() {
return RET_MEMORY_FAILED;
}
col_buffer_ =
reinterpret_cast<float *>(ctx_->allocator->Malloc(thread_count_ * tile_num * ic4 * C4NUM * sizeof(float)));
col_buffer_ = reinterpret_cast<float *>(
ctx_->allocator->Malloc(thread_count_ * tile_num * conv_param_->input_channel_ * sizeof(float)));
if (col_buffer_ == nullptr) {
MS_LOG(ERROR) << "malloc col_buffer_ failed.";
return RET_ERROR;

Loading…
Cancel
Save