fix bug in pool/conv/conv_transpose: UpdatePaddingAndDilation, _get_padding_with_SAME and conv2dtranspose_forward_naive. (#20997)

* fix bug in pool/conv/conv_transpose:
1. It should be stride[i] not stride[0] in UpdatePaddingAndDilation;
2. fix bug of func  _get_padding_with_SAME in test_conv/conv_transpose_op.py;
3. fix bug of the computation process in function conv2dtranspose_forward_naive.
test=develop

* change test to make the data of different dimensions different. test=develop
custom_op_abi
liym27 6 years ago committed by Aurelius84
parent 829bf871d7
commit 26a6e27afe

@ -83,10 +83,10 @@ inline void UpdatePaddingAndDilation(std::vector<int>* paddings,
"Paddings size should be the same or twice as the input data size.");
}
// when padding_desc is "VALID" or "SAME"
// when padding_algorithm is "VALID" or "SAME"
if (padding_algorithm == "SAME") {
for (size_t i = 0; i < data_dims.size(); ++i) {
int out_size = (data_dims[i] + strides[i] - 1) / strides[0];
int out_size = (data_dims[i] + strides[i] - 1) / strides[i];
int pad_sum =
std::max((out_size - 1) * strides[i] + ksize[i] - data_shape[i], 0);
int pad_0 = pad_sum / 2;

@ -18,6 +18,7 @@ limitations under the License. */
#include <vector>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/conv_op.h"
#include "paddle/fluid/operators/math/blas.h"
#include "paddle/fluid/operators/math/concat_and_split.h"
#include "paddle/fluid/operators/math/depthwise_conv.h"
@ -77,47 +78,6 @@ static void Slice(const framework::ExecutionContext& context,
Slice<DeviceContext, T, D>(context, input, out, begin_vec, end_vec, axes_vec);
}
inline void UpdatePaddingAndDilation(std::vector<int>* paddings,
std::vector<int>* dilation,
const std::string padding_algorithm,
const framework::DDim data_dims,
const std::vector<int>& strides,
const std::vector<int>& ksize) {
// set padding size == data_dims.size() * 2
auto data_shape = framework::vectorize<int>(data_dims);
if (paddings->size() == data_dims.size()) {
for (size_t i = 0; i < data_dims.size(); ++i) {
int copy_pad = *(paddings->begin() + 2 * i);
paddings->insert(paddings->begin() + 2 * i + 1, copy_pad);
}
} else {
PADDLE_ENFORCE_EQ(
data_dims.size() * 2, paddings->size(),
"Paddings size should be the same or twice as the input data size.");
}
// when padding_algorithm is "VALID" or "SAME"
if (padding_algorithm == "SAME") {
for (size_t i = 0; i < data_dims.size(); ++i) {
int out_size = (data_dims[i] + strides[i] - 1) / strides[0];
int pad_sum =
std::max((out_size - 1) * strides[i] + ksize[i] - data_shape[i], 0);
int pad_0 = pad_sum / 2;
int pad_1 = pad_sum - pad_0;
*(paddings->begin() + i * 2) = pad_0;
*(paddings->begin() + i * 2 + 1) = pad_1;
// dilation
*(dilation->begin() + i) = 1;
}
} else if (padding_algorithm == "VALID") {
for (auto it = paddings->begin(); it != paddings->end(); it++) {
*it = 0;
}
}
}
// Define Op classes in .h file so that other conv transpose
// operator implementations can reuse the code.
class Conv2DTransposeOpMaker : public framework::OpProtoAndCheckerMaker {

@ -66,7 +66,7 @@ inline void UpdatePadding(std::vector<int>* paddings, const bool global_pooling,
// set padding size == data_dims.size() * 2
auto data_shape = framework::vectorize<int>(data_dims);
if (paddings->size() == data_dims.size()) {
for (int i = 0; i < data_dims.size(); ++i) {
for (size_t i = 0; i < data_dims.size(); ++i) {
int copy_pad = *(paddings->begin() + 2 * i);
paddings->insert(paddings->begin() + 2 * i + 1, copy_pad);
}
@ -76,10 +76,10 @@ inline void UpdatePadding(std::vector<int>* paddings, const bool global_pooling,
"Paddings size should be the same or twice as the pooling size.");
}
// when padding_desc is "VALID" or "SAME"
// when padding_algorithm is "VALID" or "SAME"
if (padding_algorithm == "SAME") {
for (int i = 0; i < data_dims.size(); ++i) {
int out_size = (data_dims[i] + strides[i] - 1) / strides[0];
int out_size = (data_dims[i] + strides[i] - 1) / strides[i];
int pad_sum =
std::max((out_size - 1) * strides[i] + ksize[i] - data_shape[i], 0);
int pad_0 = pad_sum / 2;

@ -73,11 +73,7 @@ def conv2d_forward_naive(input,
pad = [0, 0, 0, 0]
elif padding_algorithm == "SAME":
dilation = [1, 1]
input_data_shape = []
if data_format == "NCHW":
input_data_shape = input.shape[2:4]
elif data_format == "NHWC":
input_data_shape = input.shape[1:3]
input_data_shape = input.shape[2:4]
pad = _get_padding_with_SAME(input_data_shape, ksize, stride)
pad_h_0, pad_h_1 = pad[0], pad[0]
@ -775,11 +771,11 @@ class TestConv2dOp_v2(OpTest):
def init_test_case(self):
self.pad = [0, 0]
self.stride = [1, 1]
self.stride = [1, 2]
self.input_size = [2, 3, 5, 5] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 3, 3]
self.filter_size = [6, f_c, 4, 3]
def init_dilation(self):
self.dilations = [1, 1]

@ -59,12 +59,8 @@ def conv2dtranspose_forward_naive(input_, filter_, attrs):
if padding_algorithm == "VALID":
pad = [0, 0, 0, 0]
elif padding_algorithm == "SAME":
dilation = [1, 1]
input_data_shape = []
if attrs['data_format'] == "NCHW":
input_data_shape = input_.shape[2:4]
elif attrs['data_format'] == "NHWC":
input_data_shape = input_.shape[1:3]
dilations = [1, 1]
input_data_shape = input_.shape[2:4]
pad = _get_padding_with_SAME(input_data_shape, ksize, stride)
pad_h_0, pad_h_1 = pad[0], pad[0]
@ -99,7 +95,7 @@ def conv2dtranspose_forward_naive(input_, filter_, attrs):
filter_[g * sub_in_c:(g + 1) * sub_in_c, k, :, :],
axis=0)
i1, i2 = i * stride[0], i * stride[0] + d_bolck_h
j1, j2 = j * stride[0], j * stride[0] + d_bolck_h
j1, j2 = j * stride[1], j * stride[1] + d_bolck_w
out[n, g * f_out_c + k, i1:i2:dilations[0], j1:j2:
dilations[1]] += tmp_out
@ -231,12 +227,12 @@ class TestWithAsymmetricPad(TestConv2dTransposeOp):
class TestWithSAMEPad(TestConv2dTransposeOp):
def init_test_case(self):
self.stride = [1, 1]
self.dilations = [1, 1]
self.stride = [2, 1]
self.dilations = [1, 2]
self.groups = 1
self.input_size = [2, 3, 5, 5] # NCHW
self.input_size = [2, 3, 6, 5] # NCHW
f_c = self.input_size[1]
self.filter_size = [f_c, 6, 3, 3]
self.filter_size = [f_c, 6, 4, 3]
self.padding_algorithm = 'SAME'
@ -429,7 +425,7 @@ class TestCUDNNWithAsymmetricPad(TestWithAsymmetricPad):
class TestCUDNNWithSAMEPad(TestWithSAMEPad):
def init_test_case(self):
self.pad = [1, 0, 1, 2]
self.stride = [1, 1]
self.stride = [1, 2]
self.groups = 1
self.dilations = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW

@ -75,11 +75,7 @@ def conv3d_forward_naive(input,
pad = [0, 0, 0, 0, 0, 0]
elif padding_algorithm == "SAME":
dilation = [1, 1, 1]
input_data_shape = []
if data_format == "NCDHW":
input_data_shape = input.shape[2:5]
elif data_format == "NDHWC":
input_data_shape = input.shape[1:4]
input_data_shape = input.shape[2:5]
pad = _get_padding_with_SAME(input_data_shape, ksize, stride)
pad_d_0, pad_d_1 = pad[0], pad[0]
@ -597,11 +593,36 @@ class TestConv3dOp_2(OpTest):
class TestConv3dOp_AsyPadding(TestConv3dOp_2):
def init_test_case(self):
self.stride = [1, 1, 2]
self.input_size = [2, 3, 4, 4, 4] # NCDHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 3, 3, 3]
def init_paddings(self):
self.pad = [1, 0, 1, 0, 0, 2]
self.padding_algorithm = "EXPLICIT"
class TestConv3dOp_DiffDataInDiffDim(TestConv3dOp_2):
def init_test_case(self):
self.stride = [1, 1, 2]
self.input_size = [2, 3, 4, 5, 5] # NCDHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 3, 4, 3]
def init_paddings(self):
self.pad = [1, 0, 1, 0, 0, 2]
self.padding_algorithm = "EXPLICIT"
create_test_padding_SAME_class(TestConv3dOp_DiffDataInDiffDim)
create_test_padding_VALID_class(TestConv3dOp_DiffDataInDiffDim)
create_test_channel_last_class(TestConv3dOp_DiffDataInDiffDim)
class TestCase1_AsyPadding(TestConv3dOp_2):
def init_test_case(self):
self.stride = [1, 1, 1]

@ -58,12 +58,8 @@ def conv3dtranspose_forward_naive(input_, filter_, attrs):
if padding_algorithm == "VALID":
pad = [0, 0, 0, 0, 0, 0]
elif padding_algorithm == "SAME":
dilation = [1, 1, 1]
input_data_shape = []
if attrs['data_format'] == "NCHW":
input_data_shape = input_.shape[2:5]
elif attrs['data_format'] == "NHWC":
input_data_shape = input_.shape[1:4]
dilations = [1, 1, 1]
input_data_shape = input_.shape[2:5]
pad = _get_padding_with_SAME(input_data_shape, ksize, stride)
pad_d_0, pad_d_1 = pad[0], pad[0]
@ -226,23 +222,23 @@ class TestWithAsymmetricPad(TestConv3dTransposeOp):
class TestWithSAMEPad(TestConv3dTransposeOp):
def init_test_case(self):
self.stride = [1, 1, 1]
self.dilations = [1, 1, 1]
self.stride = [1, 1, 2]
self.dilations = [1, 2, 1]
self.groups = 1
self.input_size = [2, 3, 5, 5, 5] # NCDHW
self.input_size = [2, 3, 5, 5, 6] # NCDHW
f_c = self.input_size[1]
self.filter_size = [f_c, 6, 3, 3, 3]
self.filter_size = [f_c, 6, 3, 3, 4]
self.padding_algorithm = 'SAME'
class TestWithVALIDPad(TestConv3dTransposeOp):
def init_test_case(self):
self.stride = [1, 1, 1]
self.stride = [2, 1, 1]
self.dilations = [1, 1, 1]
self.groups = 1
self.input_size = [2, 3, 5, 5, 5] # NCDHW
f_c = self.input_size[1]
self.filter_size = [f_c, 6, 3, 3, 3]
self.filter_size = [f_c, 6, 3, 4, 3]
self.padding_algorithm = 'VALID'
@ -398,12 +394,12 @@ class TestCUDNNWithAsymmetricPad(TestWithAsymmetricPad):
"core is not compiled with CUDA")
class TestCUDNNWithSAMEPad(TestWithSAMEPad):
def init_test_case(self):
self.stride = [1, 1, 1]
self.dilations = [1, 1, 1]
self.stride = [1, 1, 2]
self.dilations = [1, 2, 1]
self.groups = 1
self.input_size = [2, 3, 5, 5, 5] # NCDHW
f_c = self.input_size[1]
self.filter_size = [f_c, 6, 3, 3, 3]
self.filter_size = [f_c, 6, 3, 4, 3]
self.padding_algorithm = 'SAME'
def init_op_type(self):

@ -950,6 +950,20 @@ create_test_cudnn_padding_VALID_class(TestCase4_channel_last)
create_test_cudnn_padding_VALID_class(TestCase5_channel_last)
class TestCase1_strides(TestCase1):
def init_test_case(self):
self.ksize = [3, 3]
self.strides = [1, 2]
def init_shape(self):
self.shape = [2, 3, 4, 5]
create_test_cudnn_class(TestCase1_strides)
create_test_padding_SAME_class(TestCase1_strides)
create_test_cudnn_padding_SAME_class(TestCase1_strides)
# ----- test API
class TestPool2dAPI(OpTest):
def test_api(self):

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save