[MS][LITE][Develop] conv1x1 int8 weight transpose bug

pull/5113/head
ling 5 years ago
parent a8c5dfae3b
commit 25f29dcf50

File diff suppressed because it is too large Load Diff

@ -54,9 +54,13 @@ void ConvInt8Opt(int8_t *input_data, int8_t *packed_input, int8_t *packed_weight
ConvParameter *conv_param, GEMM_FUNC gemm_func);
// int8 convolution 1x1
void Conv1x1PreOpt(const int8_t *src_input, int8_t *packed_input, int32_t *input_sum, size_t input_channel,
size_t output_channel, size_t plane_size, ConvParameter *conv_param);
void Conv1x1Int8(const int8_t *packed_input, const int8_t *packed_weight, int8_t *dst, const int32_t *input_sum,
const int32_t *bias, int row, int col, int deep16, ConvParameter *conv_param,
MATMUL_OPT_R_FUNC matmul_func);
const int32_t *bias, int row, int col, int deep16, ConvParameter *conv_param);
void Conv1x1Int8Opt(const int8_t *packed_input, const int8_t *packed_weight, int8_t *dst, const int32_t *input_sum,
const int32_t *bias, int row, int col, int deep4, ConvParameter *conv_param,
MATMUL_OPT_R_FUNC matmul_func);
// int8 convolution 3x3
void Conv3x3Int8(int16_t *input_data, int16_t *transed_weight, const int32_t *bias_data, int8_t *output_data,

@ -172,7 +172,7 @@ void DeConvPackWeightSum(int8_t *weight, int32_t *weight_sum, int32_t input_zp,
void DeConvPackInputSum(const int8_t *src, int32_t *dst, int32_t filter_zp, size_t row4, size_t col16,
bool suppport_opt) {
/* optimize normal -> same layout */
PackInputSum16x4PerLater(src, dst, filter_zp, row4, col16);
PackInputSum16x4PerLayer(src, dst, filter_zp, row4, col16);
return;
}

@ -36,7 +36,24 @@ void RowMajor2Row4x16MajorInt8(int8_t *src_ptr, int8_t *dst_ptr, int row, int co
for (int c = 0; c < col; c++) {
int cd16 = c / C16NUM;
int cm16 = c % C16NUM;
dst_ptr[cd16 * col16 * C4NUM + rd4 * C4NUM * C16NUM + rm4 * C16NUM + cm16] = src_ptr[r * col16 + c];
int dst_index = rd4 * col16 * C4NUM + cd16 * C4NUM * C16NUM + rm4 * C16NUM + cm16;
int src_index = r * col + c;
dst_ptr[dst_index] = src_ptr[src_index];
}
}
}
void RowMajor2Row8x4MajorInt8(const int8_t *src_ptr, int8_t *dst_ptr, int row, int col) {
int col4 = UP_ROUND(col, C4NUM);
for (int r = 0; r < row; r++) {
int rd8 = r / C8NUM;
int rm8 = r % C8NUM;
for (int c = 0; c < col; c++) {
int cd4 = c / C4NUM;
int cm4 = c % C4NUM;
int dst_index = rd8 * col4 * C8NUM + cd4 * C8NUM * C4NUM + rm8 * C4NUM + cm4;
int src_index = r * col + c;
dst_ptr[dst_index] = src_ptr[src_index];
}
}
}
@ -50,6 +67,29 @@ void MatrixPack4x16UnitInt8(int8_t *src, int8_t *dst, int row, int col, int stri
return;
}
void MatrixEmptyInt8(int8_t *dst, int row, int col) {
for (int r = 0; r < row; r++) {
int8_t *dst_r = dst + r * C16NUM;
memset(dst_r, 0, col * sizeof(int8_t));
}
return;
}
void RowMajor2Row4x8MajorInt8(const int8_t *src_ptr, int8_t *dst_ptr, int row, int col) {
/* Row-major to row16x4-major (block row-major) */
int col4 = UP_ROUND(col, C4NUM);
for (int r = 0; r < row; r++) {
int rd8 = r / C8NUM, rm8 = r % C8NUM;
for (int c = 0; c < col; c++) {
int cd4 = c / C4NUM, cm4 = c % C4NUM;
int src_index = r * col + c;
int dst_index = rd8 * col4 * C8NUM + cd4 * C4NUM * C8NUM + rm8 * C4NUM + cm4;
dst_ptr[dst_index] = src_ptr[src_index];
}
}
return;
}
void RowMajor2Row16x4MajorInt8(void *src_ptr, void *dst_ptr, int row, int col) {
/* Row-major to row16x4-major (block row-major) */
int col16 = UP_ROUND(col, C16NUM);
@ -90,12 +130,15 @@ void RowMajor2Row16x4MajorInt8(void *src_ptr, void *dst_ptr, int row, int col) {
if (col != col_16div) {
MatrixPack4x16UnitInt8(src_r + col_16div, dst_r + col_16div * C4NUM, C4NUM, col_16res, col);
MatrixEmptyInt8(dst_r + col_16div * C4NUM + col_16res, C4NUM, C16NUM - col_16res);
}
src_r += C4NUM * col;
dst_r += C4NUM * col16;
}
if (row != row_4div) {
memset(dst_r, 0, C4NUM * col16);
for (int ci = 0; ci < col_16div; ci += C16NUM) {
MatrixPack4x16UnitInt8(src_r + ci, dst_r + ci * C4NUM, row_4res, C16NUM, col);
}
@ -172,6 +215,38 @@ void MatMulInt8_16x4_r(const int8_t *a, const int8_t *b, int8_t *dst, size_t row
return;
}
void MatMulInt8_8x8_r(const int8_t *a, const int8_t *b, int8_t *dst, size_t row, size_t col, size_t deep_4,
size_t stride, const int32_t *input_sum, const int32_t *bias, int32_t *left_shift,
int32_t *right_shift, int32_t *multiplier, int32_t output_zp, int32_t mini, int32_t maxi,
bool per_channel) {
/* row8x4-major * row4x8-major => (int8)row-major */
for (int r = 0; r < row; r++) {
for (int c = 0; c < col; c++) {
int r8div = r / C8NUM, r8mod = r % C8NUM;
int c8div = c / C8NUM, c8mod = c % C8NUM;
size_t ci = r * stride + c;
int32_t value = 0;
for (int d = 0; d < deep_4; d++) {
int d4div = d / C4NUM, d4mod = d % C4NUM;
size_t ai = r8div * deep_4 * C8NUM + d4div * C8NUM * C4NUM + r8mod * C4NUM + d4mod;
size_t bi = c8div * deep_4 * C8NUM + d4div * C8NUM * C4NUM + c8mod * C4NUM + d4mod;
value = value + a[ai] * b[bi];
}
int32_t cur_input_sum = per_channel ? input_sum[c8div * UP_ROUND(row, C8NUM) + r * C8NUM + c8mod] : input_sum[r];
value -= cur_input_sum;
value += bias[c];
int32_t cur_left_shift = per_channel ? left_shift[c] : left_shift[0];
int32_t cur_right_shift = per_channel ? right_shift[c] : right_shift[0];
int32_t cur_multiplier = per_channel ? multiplier[c] : multiplier[0];
value = MultiplyByQuantizedMultiplier(value, cur_multiplier, cur_left_shift, cur_right_shift) + output_zp;
value = MSMIN(maxi, value);
value = MSMAX(mini, value);
dst[ci] = (int8_t)value;
}
}
return;
}
/* row4x16-major * col16x4-major => row4x4-major */
void MatmulInt8(const int8_t *a, const int8_t *b, int8_t *dst, const int *a_sums, const int *bias, int act_min,
int act_max, int out_zp, int multiplier, int left_shift, int right_shift, int row, int col, int deep16,

@ -35,6 +35,13 @@ void RowMajor2Row4x16MajorInt8(int8_t *src_ptr, int8_t *dst_ptr, int row, int co
void RowMajor2Col8MajorInt8(int8_t *src_ptr, int8_t *dst_ptr, int row, int col);
void RowMajor2Row16x4MajorInt8(void *src_ptr, void *dst_ptr, int row, int col);
void MatMulInt8_8x8_r(const int8_t *a, const int8_t *b, int8_t *dst, size_t row, size_t col, size_t deep_4,
size_t stride, const int32_t *input_sum, const int32_t *bias, int32_t *left_shift,
int32_t *right_shift, int32_t *multiplier, int32_t output_zp, int32_t mini, int32_t maxi,
bool per_channel);
void RowMajor2Row8x4MajorInt8(const int8_t *src_ptr, int8_t *dst_ptr, int row, int col);
void RowMajor2Row4x8MajorInt8(const int8_t *src_ptr, int8_t *dst_ptr, int row, int col);
void RowMajor2Row4x16Major(int8_t *src, int row, int col, int8_t *dst, int col_16);
void RowMajor2Col16x4Major(int8_t *src, int row, int col, int8_t *dst, int row_16);
void CalcInputSums(int8_t *input, int row, int col, int weight_zp, int *dst, DataOrder order);

@ -22,7 +22,7 @@
typedef void (*MATMUL_OPT_R4_FUNC)(const int8_t *a, const int8_t *b, int *dst, int row_4, int col_4, int deep_16,
const int *input_sum, const int *bias);
typedef void (*MATMUL_OPT_R_FUNC)(const int8_t *a, const int8_t *b, int8_t *dst, size_t row, size_t col, size_t deep_16,
typedef void (*MATMUL_OPT_R_FUNC)(const int8_t *a, const int8_t *b, int8_t *dst, size_t row, size_t col, size_t deep_4,
size_t stride, const int32_t *input_sum, const int32_t *bias, int32_t *left_shift,
int32_t *right_shift, int32_t *multiplier, int32_t output_zp, int32_t mini,
int32_t maxi, bool per_channel);
@ -35,11 +35,15 @@ typedef struct MatMulParameter {
OpParameter op_parameter_;
int row_;
int col_;
int row_4_;
int row_8_;
int row_12_;
int row_16_;
int col_4_;
int col_8_;
int deep_;
int deep_4_;
int deep_16_;
bool has_bias_;
int batch;
bool a_transpose_; /* false : row-major */

@ -37,7 +37,7 @@ void IndirectGemmInt8_optimize_handler(int8_t *dst, const int8_t *src, const int
size_t ksize, size_t ic4, size_t output_channel, size_t offset,
const int32_t *input_sum, size_t act_min, size_t act_max, size_t out_zp,
int32_t *out_multiplier, int32_t *shift_before, int32_t *shift_after,
size_t asymmetric, size_t per_channel) {
size_t asymmetric, size_t per_channel) {
return IndirectGemmInt8_24x4_dp(dst, src, weight, bias, ksize, ic4, output_channel, offset, input_sum, act_min,
act_max, out_zp, out_multiplier, shift_before, shift_after, asymmetric, per_channel);
}
@ -47,7 +47,7 @@ void MatMulR4Int8_optimize_handler(const int8_t *a, const int8_t *b, int *dst, i
return MatMulOptR4Int8Neon64(a, b, dst, row4, col4, deep16, input_sum, bias);
}
void MatMulRInt8_optimize_handler(const int8_t *a, const int8_t *b, int8_t *dst, size_t row, size_t col, size_t deep_16,
void MatMulRInt8_optimize_handler(const int8_t *a, const int8_t *b, int8_t *dst, size_t row, size_t col, size_t deep_4,
size_t stride, const int32_t *input_sum, const int32_t *bias, int32_t *left_shift,
int32_t *right_shift, int32_t *multiplier, int32_t output_zp, int32_t mini,
int32_t maxi, bool per_channel) {

@ -194,7 +194,7 @@ void Pack1x1WeightFp32(const float *weight_data, float *packed_weight, ConvParam
return;
}
void PackInputSum16x4PerLater(const int8_t *src, int32_t *dst, int32_t filter_zp, size_t row4, size_t col16) {
void PackInputSum16x4PerLayer(const int8_t *src, int32_t *dst, int32_t filter_zp, size_t row4, size_t col16) {
/* optimize normal -> same layout */
#ifdef ENABLE_ARM64
asm volatile(
@ -267,12 +267,12 @@ void PackInputSum16x4PerLater(const int8_t *src, int32_t *dst, int32_t filter_zp
return;
}
void PackInputSum16x4Int8(int8_t *input_value, int32_t *input_sum, size_t input_channel, size_t output_channel,
void PackInputSum16x4Int8(const int8_t *input_value, int32_t *input_sum, size_t input_channel, size_t output_channel,
size_t plane_size, ConvParameter *conv_param) {
size_t hw4 = UP_ROUND(plane_size, C4NUM);
size_t ic16 = UP_ROUND(input_channel, C16NUM);
if (conv_param->conv_quant_arg_.filter_arg_num_ == 1) {
PackInputSum16x4PerLater(input_value, input_sum, conv_param->conv_quant_arg_.filter_quant_args_[0].zp_, hw4, ic16);
PackInputSum16x4PerLayer(input_value, input_sum, conv_param->conv_quant_arg_.filter_quant_args_[0].zp_, hw4, ic16);
} else {
for (int ri = 0; ri < plane_size; ri++) {
int ri4div = ri / C4NUM, ri4mod = ri % C4NUM;
@ -293,6 +293,40 @@ void PackInputSum16x4Int8(int8_t *input_value, int32_t *input_sum, size_t input_
return;
}
void PackInputSum8x4Int8(const int8_t *input_value, int32_t *input_sum, size_t input_channel, size_t output_channel,
size_t plane_size, ConvParameter *conv_param) {
size_t hw8 = UP_ROUND(plane_size, C8NUM);
size_t ic4 = UP_ROUND(input_channel, C4NUM);
if (conv_param->conv_quant_arg_.filter_arg_num_ == 1) {
for (int r = 0; r < hw8; r++) {
int32_t tmp_value = 0;
for (int c = 0; c < ic4; c++) {
int r8div = r / C8NUM, r8mod = r % C8NUM, c4div = c / C4NUM, c4mod = c % C4NUM;
int src_index = r8div * C8NUM * ic4 + c4div * C8NUM * C4NUM + r8mod * C4NUM + c4mod;
tmp_value += input_value[src_index];
}
input_sum[r] = tmp_value * conv_param->conv_quant_arg_.filter_quant_args_[0].zp_;
}
} else {
for (int ri = 0; ri < plane_size; ri++) {
int ri8div = ri / C8NUM, ri8mod = ri % C8NUM;
for (int ci = 0; ci < output_channel; ci++) {
int32_t tmp_sum_value = 0;
int ci8div = ci / C8NUM, ci8mod = ci % C8NUM;
int32_t filter_zp = conv_param->conv_quant_arg_.filter_quant_args_[ci].zp_;
for (int di = 0; di < input_channel; di++) {
size_t di4div = di / C4NUM, di4mod = di % C4NUM;
int src_index = ri8div * C8NUM * ic4 + di4div * C8NUM * C4NUM + ri8mod * C4NUM + di4mod;
tmp_sum_value += input_value[src_index];
}
int dst_index = ci8div * C8NUM * hw8 + ri * C8NUM + ci8mod;
input_sum[dst_index] = tmp_sum_value * filter_zp;
}
}
}
return;
}
void Im2ColPackUnitFp32(const float *input_data, ConvParameter *conv_param, float *packed_input, int real_cal_num,
int block_index) {
// input format : nhwc

@ -35,15 +35,18 @@ void Im2ColPackUnitInt8(const int8_t *input_data, int8_t *packed_input, int real
void Im2ColPackUnitInt8Opt(const int8_t *input_data, int8_t *packed_input, int real_cal_num, int block_index,
int32_t *input_sum, ConvParameter *conv_param);
void PackInputSum16x4PerLater(const int8_t *src, int32_t *dst, int32_t filter_zp, size_t row4, size_t col16);
void PackInputSum16x4PerLayer(const int8_t *src, int32_t *dst, int32_t filter_zp, size_t row4, size_t col16);
void Conv1x1InputPack(const void *src_ptr, void *dst_ptr, ConvParameter *conv_param, int data_size);
void Pack1x1WeightFp32(const float *weight_data, float *packed_weight, ConvParameter *conv_param);
void PackInputSum16x4Int8(int8_t *input_value, int32_t *input_sum, size_t input_channel, size_t output_channel,
void PackInputSum16x4Int8(const int8_t *input_value, int32_t *input_sum, size_t input_channel, size_t output_channel,
size_t plane_size, ConvParameter *conv_param);
void PackInputSum8x4Int8(const int8_t *input_value, int32_t *input_sum, size_t input_channel, size_t output_channel,
size_t plane_size, ConvParameter *conv_param);
void MatrixPack(const float *src, float *dst, int row, int ic4, int stride);
void PackInputToC8Int8(const int8_t *input_data, int16_t *packed_input, ConvParameter *conv_param);

@ -40,8 +40,13 @@ class Convolution1x1Int8CPUKernel : public ConvolutionBaseCPUKernel {
int ReSize() override;
int Run() override;
private:
int InitRunBuf();
void FreeRunBuf();
public:
int RunImpl(int task_id);
int RunPre(int task_id);
private:
void FreeResizeBuf();
@ -58,7 +63,10 @@ class Convolution1x1Int8CPUKernel : public ConvolutionBaseCPUKernel {
int8_t *output_ptr_ = nullptr;
size_t thread_count_ = 1;
size_t thread_stride_ = 0;
size_t thread_count_hw_ = 1;
size_t thread_stride_hw_ = 0;
bool pre_trans_input_ = false;
size_t input_sum_size = 0;
MatMulParameter *matmul_param_ = nullptr;
MATMUL_OPT_R_FUNC matmul_func_ = nullptr;
bool support_optimize_ = false;

@ -398,11 +398,11 @@ kernel::LiteKernel *CpuConvInt8KernelCreator(const std::vector<lite::tensor::Ten
int dilation_h = conv_param->dilation_h_;
int dilation_w = conv_param->dilation_w_;
kernel::LiteKernel *kernel;
auto filter_quant_size = inputs[kWeightIndex]->GetQuantParams().size();
if (kernel_h == 3 && kernel_w == 3 && stride_h == 1 && stride_w == 1 && dilation_h == 1 && dilation_w == 1) {
kernel = new (std::nothrow) kernel::Convolution3x3Int8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
} else if (kernel_h == 1 && kernel_w == 1) {
/* Convolution1x1Int8CPUKernel */
kernel = new (std::nothrow) kernel::ConvolutionInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
} else if (kernel_h == 1 && kernel_w == 1 && filter_quant_size == 1) {
kernel = new (std::nothrow) kernel::Convolution1x1Int8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
} else {
kernel = new (std::nothrow) kernel::ConvolutionInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
}

Loading…
Cancel
Save